repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
willingc/succulent-pups
|
config/settings/production.py
|
1
|
5159
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='succulent-pups <noreply@example.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[succulent-pups] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Your production stuff: Below this line define 3rd party library settings
|
bsd-3-clause
| 3,801,262,370,812,564,500
| 36.115108
| 117
| 0.62706
| false
| 3.74655
| true
| false
| false
|
InsightSoftwareConsortium/ITKExamples
|
src/Core/Common/ObserveAnEvent/Code.py
|
1
|
1028
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itk
Dimension = 2
PixelType = itk.UC
ImageType = itk.Image[PixelType, Dimension]
source = itk.GaussianImageSource[ImageType].New()
size = itk.Size[Dimension]()
size.Fill(128)
source.SetSize(size)
sigma = itk.FixedArray[itk.D, Dimension]()
sigma.Fill(45.0)
source.SetSigma(sigma)
def myCommand():
print("Progress: " + str(source.GetProgress()))
source.AddObserver(itk.ProgressEvent(), myCommand)
source.Update()
|
apache-2.0
| 7,537,944,320,815,879,000
| 25.358974
| 74
| 0.747082
| false
| 3.449664
| false
| false
| false
|
zasdfgbnm/tensorflow
|
tensorflow/python/estimator/util.py
|
1
|
1811
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to retrieve function args."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_bounded_method(fn):
_, fn = tf_decorator.unwrap(fn)
return tf_inspect.ismethod(fn) and (fn.__self__ is not None)
def _is_callable_object(obj):
return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__)
def fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
if isinstance(fn, functools.partial):
args = fn_args(fn.func)
args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]
else:
if _is_callable_object(fn):
fn = fn.__call__
args = tf_inspect.getfullargspec(fn).args
if _is_bounded_method(fn):
args.remove('self')
return tuple(args)
|
apache-2.0
| -1,776,816,529,995,017,500
| 30.224138
| 80
| 0.680287
| false
| 3.886266
| false
| false
| false
|
gkonstantyno/construct
|
construct/protocols/layer3/dhcpv4.py
|
1
|
6429
|
"""
Dynamic Host Configuration Protocol for IPv4
http://www.networksorcery.com/enp/protocol/dhcp.htm
http://www.networksorcery.com/enp/protocol/bootp/options.htm
"""
from binascii import unhexlify
from construct import *
from ipv4 import IpAddress
dhcp_option = Struct("dhcp_option",
Enum(Byte("code"),
Pad = 0,
Subnet_Mask = 1,
Time_Offset = 2,
Router = 3,
Time_Server = 4,
Name_Server = 5,
Domain_Name_Server = 6,
Log_Server = 7,
Quote_Server = 8,
LPR_Server = 9,
Impress_Server = 10,
Resource_Location_Server = 11,
Host_Name = 12,
Boot_File_Size = 13,
Merit_Dump_File = 14,
Domain_Name = 15,
Swap_Server = 16,
Root_Path = 17,
Extensions_Path = 18,
IP_Forwarding_enabledisable = 19,
Nonlocal_Source_Routing_enabledisable = 20,
Policy_Filter = 21,
Maximum_Datagram_Reassembly_Size = 22,
Default_IP_TTL = 23,
Path_MTU_Aging_Timeout = 24,
Path_MTU_Plateau_Table = 25,
Interface_MTU = 26,
All_Subnets_are_Local = 27,
Broadcast_Address = 28,
Perform_Mask_Discovery = 29,
Mask_supplier = 30,
Perform_router_discovery = 31,
Router_solicitation_address = 32,
Static_routing_table = 33,
Trailer_encapsulation = 34,
ARP_cache_timeout = 35,
Ethernet_encapsulation = 36,
Default_TCP_TTL = 37,
TCP_keepalive_interval = 38,
TCP_keepalive_garbage = 39,
Network_Information_Service_domain = 40,
Network_Information_Servers = 41,
NTP_servers = 42,
Vendor_specific_information = 43,
NetBIOS_over_TCPIP_name_server = 44,
NetBIOS_over_TCPIP_Datagram_Distribution_Server = 45,
NetBIOS_over_TCPIP_Node_Type = 46,
NetBIOS_over_TCPIP_Scope = 47,
X_Window_System_Font_Server = 48,
X_Window_System_Display_Manager = 49,
Requested_IP_Address = 50,
IP_address_lease_time = 51,
Option_overload = 52,
DHCP_message_type = 53,
Server_identifier = 54,
Parameter_request_list = 55,
Message = 56,
Maximum_DHCP_message_size = 57,
Renew_time_value = 58,
Rebinding_time_value = 59,
Class_identifier = 60,
Client_identifier = 61,
NetWareIP_Domain_Name = 62,
NetWareIP_information = 63,
Network_Information_Service_Domain = 64,
Network_Information_Service_Servers = 65,
TFTP_server_name = 66,
Bootfile_name = 67,
Mobile_IP_Home_Agent = 68,
Simple_Mail_Transport_Protocol_Server = 69,
Post_Office_Protocol_Server = 70,
Network_News_Transport_Protocol_Server = 71,
Default_World_Wide_Web_Server = 72,
Default_Finger_Server = 73,
Default_Internet_Relay_Chat_Server = 74,
StreetTalk_Server = 75,
StreetTalk_Directory_Assistance_Server = 76,
User_Class_Information = 77,
SLP_Directory_Agent = 78,
SLP_Service_Scope = 79,
Rapid_Commit = 80,
Fully_Qualified_Domain_Name = 81,
Relay_Agent_Information = 82,
Internet_Storage_Name_Service = 83,
NDS_servers = 85,
NDS_tree_name = 86,
NDS_context = 87,
BCMCS_Controller_Domain_Name_list = 88,
BCMCS_Controller_IPv4_address_list = 89,
Authentication = 90,
Client_last_transaction_time = 91,
Associated_ip = 92,
Client_System_Architecture_Type = 93,
Client_Network_Interface_Identifier = 94,
Lightweight_Directory_Access_Protocol = 95,
Client_Machine_Identifier = 97,
Open_Group_User_Authentication = 98,
Autonomous_System_Number = 109,
NetInfo_Parent_Server_Address = 112,
NetInfo_Parent_Server_Tag = 113,
URL = 114,
Auto_Configure = 116,
Name_Service_Search = 117,
Subnet_Selection = 118,
DNS_domain_search_list = 119,
SIP_Servers_DHCP_Option = 120,
Classless_Static_Route_Option = 121,
CableLabs_Client_Configuration = 122,
GeoConf = 123,
),
Switch("value", lambda ctx: ctx.code,
{
# codes without any value
"Pad" : Pass,
},
# codes followed by length and value fields
default = Struct("value",
Byte("length"),
Field("data", lambda ctx: ctx.length),
)
)
)
dhcp_header = Struct("dhcp_header",
Enum(Byte("opcode"),
BootRequest = 1,
BootReply = 2,
),
Enum(Byte("hardware_type"),
Ethernet = 1,
Experimental_Ethernet = 2,
ProNET_Token_Ring = 4,
Chaos = 5,
IEEE_802 = 6,
ARCNET = 7,
Hyperchannel = 8,
Lanstar = 9,
),
Byte("hardware_address_length"),
Byte("hop_count"),
UBInt32("transaction_id"),
UBInt16("elapsed_time"),
BitStruct("flags",
Flag("boardcast"),
Padding(15),
),
IpAddress("client_addr"),
IpAddress("your_addr"),
IpAddress("server_addr"),
IpAddress("relay_addr"),
Bytes("client_hardware_addr", 16),
Bytes("server_host_name", 64),
Bytes("boot_filename", 128),
# BOOTP/DHCP options
# "The first four bytes contain the (decimal) values 99, 130, 83 and 99"
Const("magic", b"\x63\x82\x53\x63"),
Rename("options", OptionalGreedyRange(dhcp_option)),
)
if __name__ == "__main__":
test = unhexlify(
b"0101060167c05f5a00000000"
"0102030405060708090a0b0c"
"0d0e0f10"
"DEADBEEFBEEF"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000"
"63825363"
"3501083d0701DEADBEEFBEEF0c04417375733c084d53465420352e"
"30370d010f03062c2e2f1f2179f92bfc52210117566c616e333338"
"382b45746865726e6574312f302f32340206f8f0827348f9ff"
)
print(dhcp_header.parse(test))
|
mit
| -2,805,672,608,556,438,000
| 31.969231
| 76
| 0.605226
| false
| 3.671616
| false
| false
| false
|
gtalarico/pyrevitplus
|
pyRevitPlus.tab/Smart Align.panel/smartalign.stack/Lib/smartalign/distribute.py
|
1
|
3365
|
"""
Smart Align
Provides Aligning functionality for various Revit Objects.
TESTED REVIT API: 2015 | 2016
Copyright (c) 2014-2016 Gui Talarico
github.com/gtalarico | @gtalarico
This script is part of PyRevitPlus: Extensions for PyRevit
github.com/gtalarico | @gtalarico
--------------------------------------------------------
PyRevit Notice:
Copyright (c) 2014-2016 Ehsan Iran-Nejad
pyRevit: repository at https://github.com/eirannejad/pyRevit
"""
__author__ = '@gtalarico'
__version = '0.4.0'
import sys
import os
sys.path.append(os.path.dirname(__file__))
from Autodesk.Revit.DB import XYZ
from Autodesk.Revit.DB import Transaction
from core import logger
from core import Align
from core import PointElement, PointCollection, BoundingBoxElement
from core import get_location, get_selected_elements, move_element
from core import TOLERANCE
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
def get_division_steps(delta, qty_items):
"""ADD DOC: Move to Point Collection"""
step = abs(delta/(qty_items-1))
steps = []
for i in range(0, qty_items):
steps.append(i*step)
logger.debug('Step is: {}'.format(step))
return steps
def main(ALIGN):
""" ADD DOCS
"""
align_axis = Align.axis[ALIGN]
align_method = Align.method[ALIGN]
logger.info('Align Class: {}'.format(ALIGN))
logger.debug('Align Axis: {}'.format(align_axis))
logger.debug('Align Methid: {}'.format(align_method))
elements = get_selected_elements()
point_collection = PointCollection()
for element in elements:
point_element = get_location(element, align_method)
if point_element:
point_element.element = element
point_collection.points.append(point_element)
point_collection.sort_points(align_axis)
qty_items = len(point_collection)
min_target = getattr(point_collection, 'min')
max_target = getattr(point_collection, 'max')
delta = getattr(max_target, align_axis) - getattr(min_target, align_axis)
steps = get_division_steps(delta, qty_items)
target_locations = [ getattr(min_target, align_axis) + step for step in steps]
logger.debug('Min Location Target is: {}'.format(min_target))
logger.debug('Max Location Target is: {}'.format(max_target))
logger.debug('delta is: {}'.format(str(delta)))
logger.debug('steps: {}'.format(steps))
logger.debug('targer_locations: {}'.format(target_locations))
t = Transaction(doc, 'Smart Align - Distribute')
t.Start()
for point_element, target_location in zip(point_collection, target_locations):
current_location = getattr(point_element, align_axis)
delta = current_location - target_location
delta_vector = PointElement(0, 0, 0)
setattr(delta_vector, align_axis,-delta)
translation = XYZ(*delta_vector.as_tuple)
move_element(point_element.element, translation)
logger.debug('current: {}'.format(current_location))
logger.debug('target: {}'.format(target_location))
logger.debug('delta: {}'.format(delta))
logger.debug('delta_vector: {}'.format(delta_vector))
logger.debug('Translation: {}'.format(str(translation)))
logger.info('Done.')
t.Commit()
|
gpl-3.0
| -8,550,756,224,317,934,000
| 31.316832
| 82
| 0.653492
| false
| 3.606645
| false
| false
| false
|
boknilev/diacritization
|
extract_data.py
|
1
|
7629
|
__author__ = 'belinkov'
import sys
import os
import codecs
import re
from data_utils import DIACS, REGEX_DIACS
REGEX_SOLUTION_DIAC = re.compile(r'\((.+?)\)') # for gold diacritized word
class WordAnalysis(object):
"""
A simplified pos analysis from treebank pos/before-treebank files.
Attributes:
input_string (str): INPUT STRING from LDC file
lookup_word (str): LOOK-UP WORD from LDC file (if exists)
comment (str): Comment from LDC file
index (str): INDEX from LDC file
gold_solution (str): the gold * SOLUTION from LDC file
word (str): for Arabic words, same as lookup_word with diacritics removed;
for non-Arabic words, same as input_string
word_diac (str): for Arabic words, the diacritized lookup_word from gold_solution;
for non-Arabic words, same as input_string
"""
def __init__(self, input_string, comment, index, gold_solution=None, lookup_word=None):
self.input_string = input_string
self.comment = comment
self.index = index
self.gold_solution = gold_solution
self.lookup_word = lookup_word
# if this is an Arabic script word
if lookup_word:
self.word = REGEX_DIACS.sub('', lookup_word)
if gold_solution:
match = REGEX_SOLUTION_DIAC.match(gold_solution)
if not match:
sys.stderr.write('Warning: could not find diacritized solution in: ' + gold_solution + '. ' + \
'Writing lookup word as is: ' + lookup_word + '\n')
self.word_diac = lookup_word
else:
self.word_diac = match.groups()[0]
self.check_match()
# there may be no solution if the word is unknown, so just write the lookup word
else:
self.word_diac = lookup_word
# this is a non-Arabic script word
else:
# TODO consider marking as Lating words (and exclude later)
self.word = input_string
self.word_diac = input_string
def check_match(self):
"""
Check match between word and word_diac
"""
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: word ' + self.word + ' != word_diac ' + self.word_diac + \
' after removing diacritics. Attempting to correct\n')
self.unnormalize()
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: could not correct, word ' + self.word + ' != word_diac ' + \
self.word_diac + '. Using undiacritized word_diac as word.\n')
self.word = REGEX_DIACS.sub('', self.word_diac)
if REGEX_DIACS.sub('', self.word_diac) != self.word:
sys.stderr.write('Warning: still word ' + self.word + ' != word_diac ' + self.word_diac + '\n')
def unnormalize(self):
"""
Try to reverse Buckwalter normalizations on diacritized word
"""
# first, remove "_" (elongation character)
self.word = self.word.replace('_', '')
self.word_diac = self.word_diac.replace('_', '')
# next, check for normalization mismatches
word_ind = 0
word_diac_ind = 0
new_word_diac = ''
while word_ind < len(self.word) and word_diac_ind < len(self.word_diac):
word_char = self.word[word_ind]
word_diac_char = self.word_diac[word_diac_ind]
if word_char == word_diac_char:
new_word_diac += word_diac_char
word_ind += 1
word_diac_ind += 1
elif word_diac_char in DIACS:
new_word_diac += word_diac_char
word_diac_ind += 1
else:
# this is probably a normalization
# print 'word_char:', word_char, 'word_diac_char:', word_diac_char
new_word_diac += word_char
word_ind += 1
word_diac_ind += 1
if word_ind == len(self.word) and word_diac_ind == len(self.word_diac) - 1:
# if we have one more char in word_diac
word_diac_char = self.word_diac[word_diac_ind]
if word_diac_char in DIACS:
new_word_diac += word_diac_char
self.word_diac = new_word_diac
def process_treebank_file(treebank_filename, output_file, output_file_diac):
"""
Extract data from a treebank file
:param treebank_filename: pos/before-treebank file
:param output_file: file to write words without diacritics
:param output_file_diac: file to write words with diacritics
:return:
"""
print 'extracting data from file:', treebank_filename
f = codecs.open(treebank_filename, encoding='utf8')
input_string, comment, index, gold_solution, lookup_word = ['']*5
prev_index = '' # keep track of previous index
for line in f:
if line.strip() == '':
if input_string == '':
continue
word_analysis = WordAnalysis(input_string, comment, index, gold_solution, lookup_word)
# check for a new paragraph
if prev_index.startswith('P') and index.startswith('P') and not prev_index.startswith(index.split('W')[0]):
output_file.write('\n')
output_file_diac.write('\n')
output_file.write(word_analysis.word + '\n')
output_file_diac.write(word_analysis.word_diac + '\n')
prev_index = index
input_string, comment, index, gold_solution, lookup_word = ['']*5
else:
splt = line.strip().split(':', 1)
if len(splt) != 2:
sys.stderr.write('Warning: could not split line on :, in: ' + line + '\n')
continue
field_name, field_val = splt[0].strip(), splt[1].strip()
if field_name == 'INPUT STRING':
input_string = field_val
elif field_name == 'LOOK-UP WORD':
lookup_word = field_val
elif field_name == 'Comment':
comment = field_val
elif field_name == 'INDEX':
index = field_val
elif field_name.startswith('* SOLUTION'):
gold_solution = field_val
elif field_name.startswith('SOLUTION') or field_name == '(GLOSS)':
continue
else:
sys.stderr.write('Warning: unkown field: ' + field_name + '\n')
f.close()
def process_dir(treebank_dir, output_filename, output_filename_diac):
"""
Extract data from a treebank dir
:param treebank_dir: pos/before-treebank directory
:param output_file: file to write words without diacritics
:param output_file_diac: file to write words with diacritics
:return:
"""
print 'processing treebank dir:', treebank_dir
g = codecs.open(output_filename, 'w', encoding='utf8')
g_diac = codecs.open(output_filename_diac, 'w', encoding='utf8')
for f in os.listdir(treebank_dir):
process_treebank_file(treebank_dir + '/' + f, g, g_diac)
g.close()
g_diac.close()
print 'written words to:', output_filename
print 'written diacritized words to:', output_filename_diac
if __name__ == '__main__':
if len(sys.argv) == 4:
process_dir(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print 'USAGE: python ' + sys.argv[0] + ' <treebank dir> <output word file> <output diacritized word file>'
|
mit
| 4,356,671,307,383,967,000
| 38.528497
| 119
| 0.565998
| false
| 3.703398
| false
| false
| false
|
transientlunatic/minke
|
minke/antenna.py
|
1
|
3841
|
"""
This module provides functions to calculate antenna factors for a given time, a given sky location and a given detector.
Adapted from the implementation in pylal
"""
import sys
from math import *
import lal
import lalsimulation
__author__ = "Alexander Dietz <Alexander.Dietz@astro.cf.ac.uk>; Daniel Williams <daniel.williams@ligo.org>"
def response( gpsTime, rightAscension, declination, inclination,
polarization, unit, det ):
"""
response( gpsTime, rightAscension, declination, inclination,
polarization, unit, detector )
Calculates the antenna factors for a detector 'detector' (e.g. 'H1')
at a given gps time (as integer) for a given sky location
(rightAscension, declination) in some unit (degree/radians).
This computation also takes into account a specific inclination
and polarization.
The returned values are: (f-plus, f-cross, f-average, q-value).
Example: antenna.response( 854378604.780, 11.089, 42.308, 0, 0, 'radians', 'H1' )
"""
# check the input arguments
if unit =='radians':
ra_rad = rightAscension
de_rad = declination
psi_rad = polarization
iota_rad = inclination
elif unit =='degree':
ra_rad = rightAscension/180.0*pi
de_rad = declination/180.0*pi
psi_rad = polarization/180.0*pi
iota_rad = inclination/180.0*pi
else:
raise ValueError("Unknown unit %s" % unit)
# calculate GMST if the GPS time
gps = lal.LIGOTimeGPS( gpsTime )
gmst_rad = lal.GreenwichMeanSiderealTime(gps)
# Get the detector from its prefix
try:
detector = lalsimulation.DetectorPrefixToLALDetector(det)
except KeyError:
raise ValueError("ERROR. Key %s is not a valid detector prefix." % (det))
# get the correct response data
response = detector.response
# actual computation of antenna factors
f_plus, f_cross = lal.ComputeDetAMResponse(response, ra_rad, de_rad,
psi_rad, gmst_rad)
f_ave=sqrt( (f_plus*f_plus + f_cross*f_cross)/2.0 );
ci=cos( iota_rad );
cc=ci*ci;
# calculate q-value, e.g. ratio of effective to real distance
# ref: Duncans PhD, eq. (4.3) on page 57
f_q=sqrt( f_plus*f_plus*(1+cc)*(1+cc)/4.0 + f_cross*f_cross*cc );
# output
return f_plus, f_cross, f_ave, f_q
def timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ):
"""
timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 )
Calculates the time delay in seconds between the detectors
'det1' and 'det2' (e.g. 'H1') for a sky location at (rightAscension
and declination) which must be given in certain units
('radians' or 'degree'). The time is passes as GPS time.
A positive time delay means the GW arrives first at 'det2', then at 'det1'.
Example:
antenna.timeDelay( 877320548.000, 355.084,31.757, 'degree','H1','L1')
0.0011604683260994519
Given these values, the signal arrives first at detector L1,
and 1.16 ms later at H2
"""
# check the input arguments
if unit =='radians':
ra_rad = rightAscension
de_rad = declination
elif unit =='degree':
ra_rad = rightAscension/180.0*pi
de_rad = declination/180.0*pi
else:
raise ValueError("Unknown unit %s" % unit)
# check input values
if ra_rad<0.0 or ra_rad> 2*pi:
raise ValueError( "ERROR. right ascension=%f "\
"not within reasonable range."\
% (rightAscension))
if de_rad<-pi or de_rad> pi:
raise ValueError( "ERROR. declination=%f not within reasonable range."\
% (declination))
if det1 == det2:
return 0.0
gps = lal.LIGOTimeGPS( gpsTime )
x1 = lalsimulation.DetectorPrefixToLALDetector(det1).location
x2 = lalsimulation.DetectorPrefixToLALDetector(det2).location
timedelay = lal.ArrivalTimeDiff(list(x1), list(x2), ra_rad, de_rad, gps)
return timedelay
|
isc
| 5,938,514,258,122,833,000
| 30.743802
| 120
| 0.67899
| false
| 3.163921
| false
| false
| false
|
20tab/twentytab-seo
|
seo/template_context/context_processors.py
|
1
|
2841
|
from seo.models import MetaPage, MetaSite
class MetaContent(object):
"""
MetaContent class define an object that contain informations about page or publication.
These informations are included in template.
"""
def __init__(self):
self.title = ""
self.description = ""
self.keywords = ""
self.author = ""
self.content_type = ""
self.robots = ""
self.generator = ""
self.html_head = ""
def fill_content(self, metaObject):
"""
This method fills MetaContent with informations contained in metaObjetc
"""
self.title = metaObject.title
self.description = metaObject.description
self.keywords = metaObject.keywords
self.author = metaObject.author
self.content_type = metaObject.content_type
self.robots = metaObject.robots
self.html_head = metaObject.html_head
try:#perche' Page non ha generator
self.generator = metaObject.generator
except:
self.generator = ''
def check_attr(self, item):
"""
It checks if item is defined in self object
"""
if hasattr(self, item):
if not getattr(self, item) or getattr(self, item) == "":
return False
return True
def jsonToMeta(self, json):
"""
It sets all item in a json to self
"""
for k, v in json.items():
setattr(self, k, v)
def get_fields(self):
"""
It returns this object as a dictionary
"""
return self.__dict__
def __str__(self):
return "%s" % self.title
def set_meta(request):
"""
This context processor returns meta informations contained in cached files.
If there aren't cache it calculates dictionary to return
"""
context_extras = {}
if not request.is_ajax() and hasattr(request, 'upy_context') and request.upy_context['PAGE']:
try:
site = MetaSite.objects.get(default=True)
except MetaSite.DoesNotExist:
site = None
try:
page = MetaPage.objects.get(page=request.upy_context['PAGE'])
except MetaPage.DoesNotExist:
page = None
meta_temp = MetaContent()
attr_list = ('title', 'description', 'keywords', 'author', 'content_type', 'robots', 'generator', 'html_head')
if page:
for item in attr_list:
if hasattr(page, item):
setattr(meta_temp, item, getattr(page, item, ""))
if site:
for item in attr_list:
if hasattr(site, item) and not meta_temp.check_attr(item):
setattr(meta_temp, item, getattr(site, item, ""))
context_extras['META'] = meta_temp
return context_extras
|
mit
| -8,941,388,902,364,435,000
| 31.284091
| 118
| 0.571278
| false
| 4.350689
| false
| false
| false
|
citrix-openstack-build/python-saharaclient
|
saharaclient/openstack/common/importutils.py
|
1
|
2367
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'saharaclient.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
|
apache-2.0
| 5,951,684,899,121,669,000
| 31.424658
| 78
| 0.667934
| false
| 4.005076
| false
| false
| false
|
rfyiamcool/TimeCost
|
timecost.py
|
1
|
1310
|
import time
import functools
class TimeCost(object):
def __init__(self, unit='s', precision=4, logger=None):
self.start = None
self.end = None
self.total = 0
self.unit = unit
self.precision = precision
self.__unitfactor = {'s': 1,
'ms': 1000,
'us': 1000000}
self.logger = logger
def __call__(self, f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
return f(*args, **kwargs)
return wrapped
def __enter__(self):
if self.unit not in self.__unitfactor:
raise KeyError('Unsupported time unit.')
if self.precision < 0:
raise KeyError('must gte 0')
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.time()
self.total = (self.end - self.start) * self.__unitfactor[self.unit]
if self.precision != 0:
self.total = round(self.total, self.precision)
else:
self.total = int(self.total)
if self.logger:
self.logger.info('this cost {0}{1}'.format(self.total, self.unit))
def __str__(self):
return 'this cost {0}{1}'.format(self.total, self.unit)
|
mit
| 9,133,844,000,565,606,000
| 29.465116
| 78
| 0.529008
| false
| 3.89881
| false
| false
| false
|
ahartoto/lendingclub2
|
lendingclub2/request.py
|
1
|
2731
|
# Filename: request.py
"""
LendingClub2 Request Module
Interface functions:
get
post
"""
# Standard libraries
import datetime
import time
# Requests
import requests
# Lending Club
from lendingclub2.authorization import Authorization
from lendingclub2.config import REQUEST_LIMIT_PER_SEC
from lendingclub2.error import LCError
__LAST_REQUEST_TIMESTAMP = None
# pylint: disable=global-statement
def get(*args, **kwargs):
"""
Wrapper around :py:func:`requests.get` function.
:param args: tuple - positional arguments for :py:func:`requests.get`.
:param kwargs: dict - keyword arguments for :py:func:`requests.get`.
:returns: instance of :py:class:`requests.Response`.
"""
global __LAST_REQUEST_TIMESTAMP
__add_headers_to_kwargs(kwargs)
__wait_request()
try:
response = requests.get(*args, **kwargs)
__LAST_REQUEST_TIMESTAMP = datetime.datetime.now()
return response
except requests.ConnectionError as exc:
fstr = "Cannot connect correctly"
raise LCError(fstr, details=str(exc))
# pylint: enable=global-statement
# pylint: disable=global-statement
def post(*args, **kwargs):
"""
Wrapper around :py:func:`requests.post` function.
:param args: tuple - positional arguments for :py:func:`requests.post`.
:param kwargs: dict - keyword arguments for :py:func:`requests.post`.
:returns: instance of :py:class:`requests.Response`.
"""
global __LAST_REQUEST_TIMESTAMP
__add_headers_to_kwargs(kwargs)
__wait_request()
try:
response = requests.post(*args, **kwargs)
__LAST_REQUEST_TIMESTAMP = datetime.datetime.now()
return response
except requests.ConnectionError as exc:
fstr = "Cannot connect correctly"
raise LCError(fstr, details=str(exc))
# pylint: enable=global-statement
# Internal functions
def __add_headers_to_kwargs(kwargs):
"""
Add authorization key to the headers in keyword arguments.
:param kwargs: dict
"""
auth = Authorization()
if 'headers' in kwargs:
for key, value in auth.header.items():
kwargs['headers'][key] = value
else:
kwargs['headers'] = auth.header
def __wait_request():
"""
Ensure that we are not violating the requirements on sending request
at the correct rate.
"""
if __LAST_REQUEST_TIMESTAMP is None:
return
now = datetime.datetime.now()
delta = now - __LAST_REQUEST_TIMESTAMP
total_seconds = delta.total_seconds()
wait_time_between_requests = 1.0 / REQUEST_LIMIT_PER_SEC
if total_seconds < wait_time_between_requests:
wait_time = wait_time_between_requests - total_seconds
time.sleep(wait_time)
|
mit
| -9,145,021,347,564,711,000
| 26.585859
| 75
| 0.671183
| false
| 3.901429
| false
| false
| false
|
zyga/arrowhead
|
xkcd518.py
|
1
|
3193
|
#!/usr/bin/env python3
from arrowhead import Flow, step, arrow, main
def ask(prompt):
answer = None
while answer not in ('yes', 'no'):
answer = input(prompt + ' ')
return answer
class XKCD518(Flow):
"""
https://xkcd.com/518/
"""
@step(initial=True, level=1)
@arrow('do_you_understand_flowcharts')
def start(step):
"""
START
"""
print(step.Meta.label)
# ---------------
@step(level=2)
@arrow(to='good', value='yes')
@arrow(to='okay_you_see_the_line_labeled_yes', value='no')
def do_you_understand_flowcharts(step):
"""
Do you understand flowcharts?
"""
return ask(step.Meta.label)
@step(level=2)
@arrow(to='lets_go_drink')
def good(step):
print(step.Meta.label)
# ---------------
@step(level=3)
@arrow(to='hey_I_should_try_installing_freebsd')
def lets_go_drink(step):
"""
Let's go drink.
"""
print(step.Meta.label)
@step(accepting=True, level=3)
def hey_I_should_try_installing_freebsd(step):
"""
Hey, I should try installing freeBSD!
"""
print(step.Meta.label)
# ---------------
@step(level=4)
@arrow(to='and_you_can_see_ones_labeled_no', value='yes')
@arrow(to='but_you_see_the_ones_labeled_no', value='no')
def okay_you_see_the_line_labeled_yes(step):
"""
Okay. You see the line labeled 'yes'?
"""
return ask(step.Meta.label)
@step(level=4)
@arrow(to='good', value='yes')
@arrow(to='but_you_just_followed_them_twice', value='no')
def and_you_can_see_ones_labeled_no(step):
"""
...and you can see the ones labeled 'no'?
"""
return ask(step.Meta.label)
# ---------------
@step(level=5)
@arrow(to='wait_what', value='yes')
@arrow(to='listen', value='no')
def but_you_see_the_ones_labeled_no(step):
"""
But you see the ones labeled "no"?
"""
return ask(step.Meta.label)
# ---------------
@step(accepting=True, level=5)
def wait_what(step):
"""
Wait, what!
"""
print(step.Meta.label)
# ---------------
@step(level=6)
@arrow(to='I_hate_you')
def listen(step):
"""
Listen
"""
print(step.Meta.label)
@step(accepting=True, level=6)
def I_hate_you(step):
"""
I hate you
"""
print(step.Meta.label)
# ---------------
@step(level=5)
@arrow(to='that_wasnt_a_question', value='yes')
@arrow(to='that_wasnt_a_question', value='no')
def but_you_just_followed_them_twice(step):
"""
But you just followed them twice!
"""
return ask(step.Meta.label)
@step(level=5)
@arrow(to='screw_it')
def that_wasnt_a_question(step):
"""
(That wasn't a question)
"""
print(step.Meta.label)
@step(level=4)
@arrow(to='lets_go_drink')
def screw_it(step):
"""
Screw it.
"""
print(step.Meta.label)
if __name__ == '__main__':
main(XKCD518)
|
bsd-3-clause
| -9,054,405,839,265,277,000
| 21.173611
| 62
| 0.509239
| false
| 3.258163
| false
| false
| false
|
EconForge/dolo
|
dolo/linter.py
|
1
|
17659
|
# import ast
# import json
# import ruamel.yaml as ry
# from ruamel.yaml.comments import CommentedSeq
# from dolo.compiler.symbolic import check_expression
# from dolo.compiler.recipes import recipes
# from dolo.misc.termcolor import colored
# class Compare:
# def __init__(self):
# self.d = {}
# def compare(self, A, B):
# if isinstance(A, ast.Name) and (A.id[0] == '_'):
# if A.id not in self.d:
# self.d[A.id] = B
# return True
# else:
# return self.compare(self.d[A.id], B)
# if not (A.__class__ == B.__class__):
# return False
# if isinstance(A, ast.Name):
# return A.id == B.id
# elif isinstance(A, ast.Call):
# if not self.compare(A.func, B.func):
# return False
# if not len(A.args) == len(B.args):
# return False
# for i in range(len(A.args)):
# if not self.compare(A.args[i], B.args[i]):
# return False
# return True
# elif isinstance(A, ast.Num):
# return A.n == B.n
# elif isinstance(A, ast.Expr):
# return self.compare(A.value, B.value)
# elif isinstance(A, ast.Module):
# if not len(A.body) == len(B.body):
# return False
# for i in range(len(A.body)):
# if not self.compare(A.body[i], B.body[i]):
# return False
# return True
# elif isinstance(A, ast.BinOp):
# if not isinstance(A.op, B.op.__class__):
# return False
# if not self.compare(A.left, B.left):
# return False
# if not self.compare(A.right, B.right):
# return False
# return True
# elif isinstance(A, ast.UnaryOp):
# if not isinstance(A.op, B.op.__class__):
# return False
# return self.compare(A.operand, B.operand)
# elif isinstance(A, ast.Subscript):
# if not self.compare(A.value, B.value):
# return False
# return self.compare(A.slice, B.slice)
# elif isinstance(A, ast.Index):
# return self.compare(A.value, B.value)
# elif isinstance(A, ast.Compare):
# if not self.compare(A.left, B.left):
# return False
# if not len(A.ops) == len(B.ops):
# return False
# for i in range(len(A.ops)):
# if not self.compare(A.ops[i], B.ops[i]):
# return False
# if not len(A.comparators) == len(B.comparators):
# return False
# for i in range(len(A.comparators)):
# if not self.compare(A.comparators[i], B.comparators[i]):
# return False
# return True
# elif isinstance(A, ast.In):
# return True
# elif isinstance(A, (ast.Eq, ast.LtE)):
# return True
# else:
# print(A.__class__)
# raise Exception("Not implemented")
# def compare_strings(a, b):
# t1 = ast.parse(a)
# t2 = ast.parse(b)
# comp = Compare()
# val = comp.compare(t1, t2)
# return val
# def match(m, s):
# if isinstance(m, str):
# m = ast.parse(m).body[0].value
# if isinstance(s, str):
# s = ast.parse(s).body[0].value
# comp = Compare()
# val = comp.compare(m, s)
# d = comp.d
# if len(d) == 0:
# return val
# else:
# return d
# known_symbol_types = {
# 'dtcc': recipes['dtcc']['symbols'],
# }
# class ModelException(Exception):
# type = 'error'
# def check_symbol_validity(s):
# import ast
# val = ast.parse(s).body[0].value
# assert (isinstance(val, ast.Name))
# def check_symbols(data):
# # can raise three types of exceptions
# # - unknown symbol
# # - invalid symbol
# # - already declared
# # add: not declared if missing 'states', 'controls' ?
# exceptions = []
# symbols = data['symbols']
# cm_symbols = symbols
# model_type = 'dtcc'
# already_declared = {} # symbol: symbol_type, position
# for key, values in cm_symbols.items():
# # (start_line, start_column, end_line, end_column) of the key
# if key not in known_symbol_types[model_type]:
# l0, c0, l1, c1 = cm_symbols.lc.data[key]
# exc = ModelException(
# "Unknown symbol type '{}'".format(
# key, model_type))
# exc.pos = (l0, c0, l1, c1)
# # print(l0,c0,l1,c1)
# exceptions.append(exc)
# assert (isinstance(values, CommentedSeq))
# for i, v in enumerate(values):
# (l0, c0) = values.lc.data[i]
# length = len(v)
# l1 = l0
# c1 = c0 + length
# try:
# check_symbol_validity(v)
# except:
# exc = ModelException("Invalid symbol '{}'".format(v))
# exc.pos = (l0, c0, l1, c1)
# exceptions.append(exc)
# if v in already_declared:
# ll = already_declared[v]
# exc = ModelException(
# "Symbol '{}' already declared as '{}'. (pos {})".format(
# v, ll[0], (ll[1][0] + 1, ll[1][1])))
# exc.pos = (l0, c0, l1, c1)
# exceptions.append(exc)
# else:
# already_declared[v] = (key, (l0, c0))
# return exceptions
# def check_equations(data):
# model_type = data['model_type']
# pos0 = data.lc.data['equations']
# equations = data['equations']
# exceptions = []
# recipe = recipes[model_type]
# specs = recipe['specs']
# for eq_type in specs.keys():
# if (eq_type not in equations) and (not specs[eq_type].get(
# 'optional', True)):
# exc = ModelException("Missing equation type {}.".format(eq_type))
# exc.pos = pos0
# exceptions.append(exc)
# already_declared = {}
# unknown = []
# for eq_type in equations.keys():
# pos = equations.lc.data[eq_type]
# if eq_type not in specs:
# exc = ModelException("Unknown equation type {}.".format(eq_type))
# exc.pos = pos
# exceptions.append(exc)
# unknown.append(eq_type)
# # BUG: doesn't produce an error when a block is declared twice
# # should be raised by ruaml.yaml ?
# elif eq_type in already_declared.keys():
# exc = ModelException(
# "Equation type {} declared twice at ({})".format(eq_type, pos))
# exc.pos = pos
# exceptions.append(exc)
# else:
# already_declared[eq_type] = pos
# for eq_type in [k for k in equations.keys() if k not in unknown]:
# for n, eq in enumerate(equations[eq_type]):
# eq = eq.replace('<=', '<').replace('==',
# '=').replace('=', '==').replace(
# '<', '<=')
# # print(eq)
# pos = equations[eq_type].lc.data[n]
# try:
# ast.parse(eq)
# except SyntaxError as e:
# exc = ModelException("Syntax Error.")
# exc.pos = [
# pos[0], pos[1] + e.offset, pos[0], pos[1] + e.offset
# ]
# exceptions.append(exc)
# # TEMP: incorrect ordering
# if specs[eq_type].get('target'):
# for n, eq in enumerate(equations[eq_type]):
# eq = eq.replace('<=', '<').replace('==', '=').replace(
# '=', '==').replace('<', '<=')
# pos = equations[eq_type].lc.data[n]
# lhs_name = str.split(eq, '=')[0].strip()
# target = specs[eq_type]['target'][0]
# if lhs_name not in data['symbols'][target]:
# exc = ModelException(
# "Undeclared assignement target '{}'. Add it to '{}'.".
# format(lhs_name, target))
# exc.pos = [pos[0], pos[1], pos[0], pos[1] + len(lhs_name)]
# exceptions.append(exc)
# # if n>len(data['symbols'][target]):
# else:
# right_name = data['symbols'][target][n]
# if lhs_name != right_name:
# exc = ModelException(
# "Left hand side should be '{}' instead of '{}'.".
# format(right_name, lhs_name))
# exc.pos = [
# pos[0], pos[1], pos[0], pos[1] + len(lhs_name)
# ]
# exceptions.append(exc)
# # temp
# return exceptions
# def check_definitions(data):
# if 'definitions' not in data:
# return []
# definitions = data['definitions']
# if definitions is None:
# return []
# exceptions = []
# known_symbols = sum([[*v] for v in data['symbols'].values()], [])
# allowed_symbols = {v: (0, ) for v in known_symbols} # TEMP
# for p in data['symbols']['parameters']:
# allowed_symbols[p] = (0, )
# new_definitions = dict()
# for k, v in definitions.items():
# pos = definitions.lc.data[k]
# if k in known_symbols:
# exc = ModelException(
# 'Symbol {} has already been defined as a model symbol.'.format(
# k))
# exc.pos = pos
# exceptions.append(exc)
# continue
# if k in new_definitions:
# exc = ModelException(
# 'Symbol {} cannot be defined twice.'.format(k))
# exc.pos = pos
# exceptions.append(exc)
# continue
# try:
# check_symbol_validity(k)
# except:
# exc = ModelException("Invalid symbol '{}'".format(k))
# exc.pos = pos
# exceptions.append(exc)
# # pos = equations[eq_type].lc.data[n]
# try:
# expr = ast.parse(str(v))
# # print(allowed_symbols)
# check = check_expression(expr, allowed_symbols)
# # print(check['problems'])
# for pb in check['problems']:
# name, t, offset, err_type = [pb[0], pb[1], pb[2], pb[3]]
# if err_type == 'timing_error':
# exc = Exception(
# 'Timing for variable {} could not be determined.'.
# format(pb[0]))
# elif err_type == 'incorrect_timing':
# exc = Exception(
# 'Variable {} cannot have time {}. (Allowed: {})'.
# format(name, t, pb[4]))
# elif err_type == 'unknown_function':
# exc = Exception(
# 'Unknown variable/function {}.'.format(name))
# elif err_type == 'unknown_variable':
# exc = Exception(
# 'Unknown variable/parameter {}.'.format(name))
# else:
# print(err_type)
# exc.pos = (pos[0], pos[1] + offset, pos[0],
# pos[1] + offset + len(name))
# exc.type = 'error'
# exceptions.append(exc)
# new_definitions[k] = v
# allowed_symbols[k] = (0, ) # TEMP
# # allowed_symbols[k] = None
# except SyntaxError as e:
# pp = pos # TODO: find right mark for pp
# exc = ModelException("Syntax Error.")
# exc.pos = [pp[0], pp[1] + e.offset, pp[0], pp[1] + e.offset]
# exceptions.append(exc)
# return exceptions
# def check_calibration(data):
# # what happens here if symbols are not clean ?
# symbols = data['symbols']
# pos0 = data.lc.data['calibration']
# calibration = data['calibration']
# exceptions = []
# all_symbols = []
# for v in symbols.values():
# all_symbols += v
# for s in all_symbols:
# if (s not in calibration.keys()) and (s not in symbols["exogenous"]):
# # should skip invalid symbols there
# exc = ModelException(
# "Symbol {} has no calibrated value.".format(s))
# exc.pos = pos0
# exc.type = 'warning'
# exceptions.append(exc)
# for s in calibration.keys():
# val = str(calibration[s])
# try:
# ast.parse(val)
# except SyntaxError as e:
# pos = calibration.lc.data[s]
# exc = ModelException("Syntax Error.")
# exc.pos = [pos[0], pos[1] + e.offset, pos[0], pos[1] + e.offset]
# exceptions.append(exc)
# return exceptions
# def check_all(data):
# def serious(exsc):
# return ('error' in [e.type for e in exsc])
# exceptions = check_infos(data)
# if serious(exceptions):
# return exceptions
# exceptions = check_symbols(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_definitions(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_equations(data)
# if serious(exceptions):
# return exceptions
# exceptions += check_calibration(data)
# if serious(exceptions):
# return exceptions
# return exceptions
# def human_format(err):
# err_type = err['type']
# err_type = colored(
# err_type, color=('red' if err_type == 'error' else 'yellow'))
# err_range = str([e + 1 for e in err['range'][0]])[1:-1]
# return '{:7}: {:6}: {}'.format(err_type, err_range, err['text'])
# def check_infos(data):
# exceptions = []
# if 'model_type' in data:
# model_type = data['model_type']
# if model_type not in ['dtcc', 'dtmscc', 'dtcscc', 'dynare']:
# exc = ModelException('Uknown model type: {}.'.format(
# str(model_type)))
# exc.pos = data.lc.data['model_type']
# exc.type = 'error'
# exceptions.append(exc)
# else:
# model_type = 'dtcc'
# data['model_type'] = 'dtcc'
# # exc = ModelException("Missing field: 'model_type'.")
# # exc.pos = (0,0,0,0)
# # exc.type='error'
# # exceptions.append(exc)
# if 'name' not in data:
# exc = ModelException("Missing field: 'name'.")
# exc.pos = (0, 0, 0, 0)
# exc.type = 'warning'
# exceptions.append(exc)
# return exceptions
# def lint(txt, source='<string>', format='human', catch_exception=False):
# # raise ModelException if it doesn't work correctly
# if isinstance(txt, str):
# try:
# data = ry.load(txt, ry.RoundTripLoader)
# except Exception as exc:
# if not catch_exception:
# raise exc
# return [] # should return parse error
# else:
# # txt is then assumed to be a ruamel structure
# data = txt
# if not ('symbols' in data or 'equations' in data or 'calibration' in data):
# # this is probably not a yaml filename
# output = []
# else:
# try:
# exceptions = check_all(data)
# except Exception as e:
# if not catch_exception:
# raise(e)
# exc = ModelException("Linter Error: Uncaught Exception.")
# exc.pos = [0, 0, 0, 0]
# exc.type = 'error'
# exceptions = [exc]
# output = []
# for k in exceptions:
# try:
# err_type = k.type
# except:
# err_type = 'error'
# output.append({
# 'type':
# err_type,
# 'source':
# source,
# 'range': ((k.pos[0], k.pos[1]), (k.pos[2], k.pos[3])),
# 'text':
# k.args[0]
# })
# if format == 'json':
# return (json.dumps(output))
# elif format == 'human':
# return (str.join("\n", [human_format(e) for e in output]))
# elif not format:
# return output
# else:
# raise ModelException("Unkown format {}.".format(format))
# TODO:
# - check name (already defined by smbdy else ?)
# - description: ?
# - calibration:
# - incorrect key
# - warning if not a known symbol ?
# - not a recognized identifier
# - defined twice
# - impossible to solve in closed form (depends on ...)
# - incorrect equation
# - grammatically incorrect
# - contains timed variables
# - warnings:
# - missing values
# - equations: symbols already known (beware of speed issues)
# - unknown group of equations
# - incorrect syntax
# - undeclared variable (and not a function)
# - indexed parameter
# - incorrect order
# - incorrect complementarities
# - incorrect recipe: unexpected symbol type
# - nonzero residuals (warning, to be done without compiling)
# - options: if present
# - approximation_space:
# - inconsistent boundaries
# - must equal number of states
# - distribution:
# - same size as shocks
|
bsd-2-clause
| -5,633,091,538,059,207,000
| 33.693517
| 81
| 0.482474
| false
| 3.522641
| false
| false
| false
|
smurfix/HomEvenT
|
irrigation/rainman/migrations/0031_auto.py
|
1
|
20462
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field xdays on 'Group'
db.create_table('rainman_group_xdays', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['rainman.group'], null=False)),
('day', models.ForeignKey(orm['rainman.day'], null=False))
))
db.create_unique('rainman_group_xdays', ['group_id', 'day_id'])
def backwards(self, orm):
# Removing M2M table for field xdays on 'Group'
db.delete_table('rainman_group_xdays')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rainman.controller': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Controller'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_on': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'controllers'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.day': {
'Meta': {'object_name': 'Day'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'rainman.daytime': {
'Meta': {'unique_together': "(('day', 'descr'),)", 'object_name': 'DayTime'},
'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'times'", 'to': "orm['rainman.Day']"}),
'descr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rainman.environmenteffect': {
'Meta': {'object_name': 'EnvironmentEffect'},
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environment_effects'", 'to': "orm['rainman.ParamGroup']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.feed': {
'Meta': {'object_name': 'Feed'},
'db_max_flow_wait': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'max_flow_wait'"}),
'flow': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.group': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'Group'},
'adj_rain': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_sun': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_temp': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'adj_wind': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'days': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_y'", 'blank': 'True', 'to': "orm['rainman.Day']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['rainman.Site']"}),
'valves': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['rainman.Valve']"}),
'xdays': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups_n'", 'blank': 'True', 'to': "orm['rainman.Day']"})
},
'rainman.groupadjust': {
'Meta': {'unique_together': "(('group', 'start'),)", 'object_name': 'GroupAdjust'},
'factor': ('django.db.models.fields.FloatField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adjusters'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.groupoverride': {
'Meta': {'unique_together': "(('group', 'name'), ('group', 'start'))", 'object_name': 'GroupOverride'},
'allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'rainman.history': {
'Meta': {'unique_together': "(('site', 'time'),)", 'object_name': 'History'},
'feed': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rain': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history'", 'to': "orm['rainman.Site']"}),
'sun': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'wind': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'rainman.level': {
'Meta': {'unique_together': "(('valve', 'time'),)", 'object_name': 'Level'},
'flow': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'levels'", 'to': "orm['rainman.Valve']"})
},
'rainman.log': {
'Meta': {'object_name': 'Log'},
'controller': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Controller']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['rainman.Site']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 22, 0, 0)', 'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'logs'", 'null': 'True', 'to': "orm['rainman.Valve']"})
},
'rainman.paramgroup': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'ParamGroup'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'factor': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rain': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'param_groups'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'rainman.rainmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'RainMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rain_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.schedule': {
'Meta': {'unique_together': "(('valve', 'start'),)", 'object_name': 'Schedule'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'max_length': '1'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['rainman.Valve']"})
},
'rainman.site': {
'Meta': {'object_name': 'Site'},
'db_rain_delay': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300', 'db_column': "'rain_delay'"}),
'db_rate': ('django.db.models.fields.FloatField', [], {'default': '2', 'db_column': "'rate'"}),
'host': ('django.db.models.fields.CharField', [], {'default': "'localhost'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50005'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
'rainman.sunmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'SunMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sun_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.tempmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'TempMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'temp_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
},
'rainman.userforgroup': {
'Meta': {'object_name': 'UserForGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['rainman.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rainman.valve': {
'Meta': {'unique_together': "(('controller', 'name'),)", 'object_name': 'Valve'},
'area': ('django.db.models.fields.FloatField', [], {}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'controller': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Controller']"}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.Feed']"}),
'flow': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'max_level': ('django.db.models.fields.FloatField', [], {'default': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'param_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'valves'", 'to': "orm['rainman.ParamGroup']"}),
'priority': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'runoff': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'shade': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'start_level': ('django.db.models.fields.FloatField', [], {'default': '8'}),
'stop_level': ('django.db.models.fields.FloatField', [], {'default': '3'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'verbose': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'rainman.valveoverride': {
'Meta': {'unique_together': "(('valve', 'name'), ('valve', 'start'))", 'object_name': 'ValveOverride'},
'db_duration': ('django.db.models.fields.PositiveIntegerField', [], {'db_column': "'duration'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'off_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'on_level': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'valve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'overrides'", 'to': "orm['rainman.Valve']"})
},
'rainman.windmeter': {
'Meta': {'unique_together': "(('site', 'name'),)", 'object_name': 'WindMeter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wind_meters'", 'to': "orm['rainman.Site']"}),
'var': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'weight': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '10'})
}
}
complete_apps = ['rainman']
|
gpl-3.0
| 4,006,488,282,154,659,000
| 77.402299
| 182
| 0.537973
| false
| 3.655887
| false
| false
| false
|
rwl/PyCIM
|
CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/DynamicsMetaBlockParameterReference.py
|
1
|
4926
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CoreIdentifiedObject import CoreIdentifiedObject
class DynamicsMetaBlockParameterReference(CoreIdentifiedObject):
def __init__(self, StandardControlBlock_MetaBlockConnectable=None, MetaBlockConnectable=None, MemberOf_MetaBlockReference=None, *args, **kw_args):
"""Initialises a new 'DynamicsMetaBlockParameterReference' instance.
@param StandardControlBlock_MetaBlockConnectable:
@param MetaBlockConnectable:
@param MemberOf_MetaBlockReference:
"""
self._StandardControlBlock_MetaBlockConnectable = None
self.StandardControlBlock_MetaBlockConnectable = StandardControlBlock_MetaBlockConnectable
self._MetaBlockConnectable = None
self.MetaBlockConnectable = MetaBlockConnectable
self._MemberOf_MetaBlockReference = None
self.MemberOf_MetaBlockReference = MemberOf_MetaBlockReference
super(DynamicsMetaBlockParameterReference, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["StandardControlBlock_MetaBlockConnectable", "MetaBlockConnectable", "MemberOf_MetaBlockReference"]
_many_refs = []
def getStandardControlBlock_MetaBlockConnectable(self):
return self._StandardControlBlock_MetaBlockConnectable
def setStandardControlBlock_MetaBlockConnectable(self, value):
if self._StandardControlBlock_MetaBlockConnectable is not None:
filtered = [x for x in self.StandardControlBlock_MetaBlockConnectable.StandardControlBlock_MetaBlockParameterReference if x != self]
self._StandardControlBlock_MetaBlockConnectable._StandardControlBlock_MetaBlockParameterReference = filtered
self._StandardControlBlock_MetaBlockConnectable = value
if self._StandardControlBlock_MetaBlockConnectable is not None:
if self not in self._StandardControlBlock_MetaBlockConnectable._StandardControlBlock_MetaBlockParameterReference:
self._StandardControlBlock_MetaBlockConnectable._StandardControlBlock_MetaBlockParameterReference.append(self)
StandardControlBlock_MetaBlockConnectable = property(getStandardControlBlock_MetaBlockConnectable, setStandardControlBlock_MetaBlockConnectable)
def getMetaBlockConnectable(self):
return self._MetaBlockConnectable
def setMetaBlockConnectable(self, value):
if self._MetaBlockConnectable is not None:
filtered = [x for x in self.MetaBlockConnectable.MetaBlockParameterReference if x != self]
self._MetaBlockConnectable._MetaBlockParameterReference = filtered
self._MetaBlockConnectable = value
if self._MetaBlockConnectable is not None:
if self not in self._MetaBlockConnectable._MetaBlockParameterReference:
self._MetaBlockConnectable._MetaBlockParameterReference.append(self)
MetaBlockConnectable = property(getMetaBlockConnectable, setMetaBlockConnectable)
def getMemberOf_MetaBlockReference(self):
return self._MemberOf_MetaBlockReference
def setMemberOf_MetaBlockReference(self, value):
if self._MemberOf_MetaBlockReference is not None:
filtered = [x for x in self.MemberOf_MetaBlockReference.MetaBlockParameterReference if x != self]
self._MemberOf_MetaBlockReference._MetaBlockParameterReference = filtered
self._MemberOf_MetaBlockReference = value
if self._MemberOf_MetaBlockReference is not None:
if self not in self._MemberOf_MetaBlockReference._MetaBlockParameterReference:
self._MemberOf_MetaBlockReference._MetaBlockParameterReference.append(self)
MemberOf_MetaBlockReference = property(getMemberOf_MetaBlockReference, setMemberOf_MetaBlockReference)
|
mit
| -4,645,534,073,048,918,000
| 49.783505
| 150
| 0.75944
| false
| 4.478182
| false
| false
| false
|
jvce92/web-tdd
|
lists/tests/testView.py
|
1
|
4453
|
from django.test import TestCase
from django.core.urlresolvers import resolve
from lists.views import homePage
from django.http import HttpRequest
from django.template.loader import render_to_string
from lists.models import Item, List
from django.utils.html import escape
# Create your tests here.
#
# class smokeTest(TestCase):
#
# def testBadMath(self):
# self.assertEqual(1+1,3)
class homePageTest(TestCase):
def testRootUrlResolvesToHomePage(self):
found = resolve('/')
self.assertEqual(found.func,homePage)
# def testHomePageReturnsHtml(self):
# request = HttpRequest()
# response = homePage(request)
# expectedHtml = render_to_string('home.html')
# self.assertEqual(expectedHtml,response.content.decode())
class ListViewTest(TestCase):
def testUsesListTemplate(self):
myList = List.objects.create()
response = self.client.get('/lists/%d/' % (myList.id,))
self.assertTemplateUsed(response, 'list.html')
def testDisplaysOnlyItemsForThatList(self):
correctList = List.objects.create()
Item.objects.create(text='item1', list = correctList)
Item.objects.create(text='item2', list = correctList)
wrongList = List.objects.create()
Item.objects.create(text='otherItem1', list = wrongList)
Item.objects.create(text='otherItem2', list = wrongList)
response = self.client.get('/lists/%d/' % (correctList.id, ))
self.assertContains(response, 'item1')
self.assertContains(response, 'item2')
self.assertNotContains(response, 'otherItem1')
self.assertNotContains(response, 'otherItem2')
def testDisplayAllItems(self):
myList = List.objects.create()
Item.objects.create(text='item1', list = myList)
Item.objects.create(text='item2', list = myList)
response = self.client.get('/lists/%d/' % (myList.id, ))
self.assertContains(response, 'item1')
self.assertContains(response, 'item2')
def testUseDifferentTemplates(self):
myList = List.objects.create()
response = self.client.get('/lists/%d/' % (myList.id, ))
self.assertTemplateUsed(response,'list.html')
class NewListTest(TestCase):
def testHandlePostRequest(self):
self.client.post(
'/lists/new', data = {'item_text':'New item', }
)
self.assertEqual(Item.objects.count(),1)
newItem = Item.objects.first()
self.assertEqual(newItem.text,'New item')
def testRedirectsAfterPost(self):
response = self.client.post(
'/lists/new', data = {'item_text': 'New item', }
)
newList = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (newList.id, ))
def testCanSavePostToAnExistingList(self):
wrongList = List.objects.create()
correctList = List.objects.create()
self.client.post(
'/lists/%d/' % (correctList.id,), data = {'item_text':'New item for existing list'}
)
self.assertEqual(Item.objects.count(),1)
newItem = Item.objects.first()
self.assertEqual(newItem.text,'New item for existing list')
self.assertEqual(newItem.list,correctList)
def testRedirectsToListView(self):
wrongList = List.objects.create()
correctList = List.objects.create()
response = self.client.post(
'/lists/%d/' % (correctList.id,), data = {'item_text':'New item for existing list'}
)
self.assertRedirects(response,'/lists/%d/' % (correctList.id, ))
def testPassesCorrectListToTemplate(self):
wrongList = List.objects.create()
correctList = List.objects.create()
response = self.client.get(
'/lists/%d/' % (correctList.id, ),
)
self.assertEqual(response.context['list'],correctList)
def testValidationErrorsAreSentToHomePageTemplate(self):
response = self.client.post('/lists/new', data={'item_text':''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
expectedError = escape("You can't have an empty list item")
self.assertContains(response, expectedError)
def testEmptyItemsAreNotSaved(self):
response = self.client.post('/lists/new', data={'item_text':''})
self.assertEqual(List.objects.count(),0)
self.assertEqual(Item.objects.count(),0)
|
gpl-3.0
| 5,634,385,062,296,922,000
| 33.789063
| 91
| 0.650573
| false
| 3.86881
| true
| false
| false
|
radical-cybertools/radical.repex
|
old/misc/experimental_async/async_with_pipeline_suspend.py
|
1
|
9815
|
#!/usr/bin/env python
import radical.utils as ru
#import radical.analytics as ra
import radical.entk as re
from radical.entk import Pipeline, Stage, Task, AppManager
import os
import tarfile
import writeInputs
import time
import git
#import replica
#os.environ['RADICAL_SAGA_VERBOSE'] = 'INFO'
os.environ['RADICAL_ENTK_VERBOSE'] = 'INFO'
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
os.environ['SAGA_PTY_SSH_TIMEOUT'] = '2000'
#os.environ['RADICAL_VERBOSE'] = 'INFO'
"""
Every instance of the Replica object instantiates a pipeline for itself. Once the pipeline is created, an MD task is carried out.
The End of this MD task/stage, every replica transitions into a wait state, all the while looking for other replicas that are also
waiting. The number of replicas waiting is written to a list that has a maximum size limit. As soon as this limit is reached the
replicas on the list begin to exchange and the list is emptied. The list can now be populated by new replicas finishing their MD
stages. Termination criterion: ALL replicas have performed at least N exchange attempts (i.e. "cycles" specified by the user).
There are 3 data structures maintained here:
1) List of replicas that have completed MD and are awaiting exchange.
2) Array containing the number of times each replica has exchanged.
3) Dictionary containing locations of all replica sandboxes.
"""
replicas = 4
replica_cores = 1
min_temp = 100
max_temp = 200
timesteps = 1000
basename = 'ace-ala'
cycle = 1
md_executable = '/home/scm177/mantel/AMBER/amber14/bin/sander'
SYNCHRONICITY = 0.5
wait_ratio = 0
max_waiting_list = 2
waiting_replicas = []
min_completed_cycles = 3
replica_cycles = [0]*replicas
wait_count = 0
def setup_replicas(replicas, min_temp, max_temp, timesteps, basename):
writeInputs.writeInputs(max_temp=max_temp, min_temp=min_temp, replicas=replicas, timesteps=timesteps, basename=basename)
tar = tarfile.open("input_files.tar", "w")
for name in [basename + ".prmtop", basename + ".inpcrd", basename + ".mdin"]:
tar.add(name)
for r in range(replicas):
tar.add('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
tar.close()
for r in range(replicas):
os.remove('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
setup_p = Pipeline()
setup_p.name = 'untarPipe'
repo = git.Repo('.', search_parent_directories=True)
aux_function_path = repo.working_tree_dir
untar_stg = Stage()
untar_stg.name = 'untarStg'
#Untar Task
untar_tsk = Task()
untar_tsk.name = 'untarTsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = ['untar_input_files.py', 'input_files.tar']
untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
untar_tsk.cpu_reqs = 1
untar_tsk.post_exec = []
untar_stg.add_tasks(untar_tsk)
setup_p.add_stages(untar_stg)
global replica_sandbox
replica_sandbox='$Pipeline_%s_Stage_%s_Task_%s'%(setup_p.name, untar_stg.name, untar_tsk.name)
return setup_p
####_----------------------------------------------------------init replicas
class Replica(object):
def __init__(self):
self.state_history = []
def replica_pipeline(self, rid, cycle, replica_cores, md_executable, timesteps, replica_sandbox):
def add_ex_stg(rid, cycle):
#ex stg here
ex_tsk = Task()
ex_stg = Stage()
ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
for rid in range(len(waiting_replicas)):
ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle)%replica_sandbox]
ex_tsk.arguments = ['t_ex_gibbs_async.py', len(waiting_replicas)] #This needs to be fixed
ex_tsk.executable = ['python']
ex_tsk.cpu_reqs = {
'processes': 1,
'process_type': '',
'threads_per_process': 1,
'thread_type': None
}
ex_tsk.pre_exec = ['export dummy_variable=19']
ex_stg.add_tasks(ex_tsk)
return ex_stg
def add_md_stg(rid,cycle):
#md stg h
md_tsk = Task()
md_stg = Stage()
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
md_tsk.link_input_data += ['%s/inpcrd' %replica_sandbox,
'%s/prmtop' %replica_sandbox,
'%s/mdin-{replica}-{cycle}'.format(replica=rid, cycle=0) %replica_sandbox]
md_tsk.arguments = ['-O',
'-i', 'mdin-{replica}-{cycle}'.format(replica=rid, cycle=0),
'-p', 'prmtop',
'-c', 'inpcrd',
'-o', 'out',
'-r', '%s/restrt-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox,
'-x', 'mdcrd',
'-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox]
md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander']
md_tsk.cpu_reqs = {
'processes': replica_cores,
'process_type': '',
'threads_per_process': 1,
'thread_type': None
}
md_tsk.pre_exec = ['export dummy_variable=19', 'echo $SHARED']
md_stg.add_tasks(md_tsk)
md_stg.post_exec = {
'condition': md_post,
'on_true': suspend,
'on_false': exchange_stg
}
return md_stg
def synchronicity_function():
"""
synchronicity function should evaluate the following:
1) Has the replica in THIS pipeline completed enough cycles?
2) If yes, Is the replica threshold met? I.e. is the exchange list large enough?
3) If no, add to waiting list
4) Is the replica is THIS pipeline the LOWEST rid in the list?
If 1 and 2 return True, the Synchronicity Function returns a True.
If the first is true and second is false, the synchronicity function returns False
EXTREMELY IMPORTANT: Remember to clear replica related variables, replica lists etc., after the adaptivity
operations have completed! i.e. after the propagate_cycle() function. Not necessary to clear after end_func().
"""
global replica_cycles
global ex_pipeline
global max_waiting_list
global min_completed_cycles
print replica_cycles, rid
replica_cycles[rid] += 1
print replica_cycles
if min(replica_cycles) < min_completed_cycles:
waiting_replicas.append(rid)
if len(waiting_replicas) < max_waiting_list:
p_replica.suspend()
#p_replica.resume() # There seems to be an issue here. We potentially need the "resume" function to be triggered
# by a different pipeline.
ex_pipeline = min(waiting_replicas)
print "Synchronicity Function returns True"
return True
return False
def propagate_cycle():
"""
This function adds two stages to the pipeline: an exchange stage and an MD stage.
If the pipeline is not the "ex_pipeline", it stalls and adds only the MD stage until the EX pipeline has completed
the EX task.
"""
if rid is ex_pipeline: ### FIX THIS TO REFER TO THE CORRECT NAME OF THE EX PIPELINE
# This adds an Ex task.
ex_stg = add_ex_stg(rid, cycle)
p_replica.add_stages(ex_stg)
# And the next MD stage
md_stg = add_md_stg(rid, cycle)
p_replica.add_stages(md_stg)
else:
while ex_stg.state is not "COMPLETED": ### FIX THIS TO REFER TO THE CORRECT NAME OF THE EX STAGE
#time.sleep(1)
pass
md_stg = add_md_stg(rid, cycle)
p_replica.add_stages(md_stg)
waiting_replicas = [] # EMPTY REPLICA WAITING LIST
def end_func():
print "DONE"
p_replica = Pipeline()
p_replica.name = 'p_{rid}'.format(rid=rid)
md_stg = add_md_stg(rid, cycle)
p_replica.add_stages(md_stg)
return p_replica
system = setup_replicas(replicas, min_temp, max_temp, timesteps, basename)
replica=[]
replica_pipelines = []
for rid in range(replicas):
print rid
replica = Replica()
r_pipeline = replica.replica_pipeline(rid, cycle, replica_cores, md_executable, timesteps, replica_sandbox)
replica_pipelines.append(r_pipeline)
os.environ['RADICAL_PILOT_DBURL'] = "mongodb://smush:key1209@ds147361.mlab.com:47361/db_repex_4"
res_dict ={
"resource" : 'local.localhost',
"walltime" : 30,
"cpus" : 4,
}
appman = AppManager(autoterminate=False, port=32769)
appman.resource_desc = res_dict
appman.workflow = set([system])
appman.run()
appman.workflow = set(replica_pipelines)
appman.run()
appman.resource_terminate()
|
mit
| -1,247,269,295,089,884,200
| 34.690909
| 130
| 0.563423
| false
| 3.856582
| false
| false
| false
|
dextervip/rpv
|
GerenDisponibilidade/setup.py
|
1
|
1613
|
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
import os
VERSION = '0.3'
# Fazendo os dados irem para o lugar correto. [Make data go to the right place.]
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
template_dir = "registration_defaults/templates/registration"
templates = [os.path.join(template_dir, f) for f in os.listdir(template_dir)]
setup(
name='django-registration-defaults',
version=VERSION,
description="Default templates and settings for James Bennett's"
"django-registration application.",
long_description="This module provides a full set of default templates"
" and settings for ``django-registration`` to ease the process of"
" creating Django apps that require user registration. It depends"
" on ``django-registration``.",
author="Charlie DeTar",
author_email="cfd@media.mit.edu",
url="http://github.com/yourcelf/django-registration-defaults",
license="MIT License",
platforms=["any"],
packages=['registration_defaults'],
package_data={'registration_defaults': templates},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
],
include_package_data=True,
)
|
gpl-3.0
| -2,980,506,392,158,818,300
| 37.404762
| 104
| 0.688779
| false
| 3.982716
| false
| false
| false
|
ella/mypage
|
mypage/pages/migrations/0008_dupe_sites.py
|
1
|
3621
|
from south.db import db
from django.db import models
from django.db.models import F
from mypage.pages.models import *
import datetime
class Migration:
def forwards(self, orm):
'''
if not db.dry_run:
orm.UserPage.objects.update(site_copy=F('site'))
orm.SessionPage.objects.update(site_copy=F('site'))
'''
db.execute('''
update pages_userpage p JOIN pages_page ON p.page_ptr_id = pages_page.id set p.site_copy_id=site_id;
''')
db.execute('''
update pages_sessionpage p JOIN pages_page ON p.page_ptr_id = pages_page.id set p.site_copy_id=site_id;
''')
def backwards(self, orm):
pass
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.widgetinpage': {
'Meta': {'unique_together': "(('page','widget',),)"},
'config_json': ('models.TextField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'page': ('models.ForeignKey', ["orm['pages.Page']"], {'verbose_name': "_('Page')"}),
'rendered_widget': ('models.ForeignKey', ["orm['widgets.RenderedWidget']"], {'null': 'False'}),
'state': ('models.SmallIntegerField', [], {'default': '2'}),
'widget': ('models.ForeignKey', ["orm['widgets.Widget']"], {'verbose_name': "_('Widget')"})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'widgets.widget': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'layout_json': ('models.TextField', [], {}),
'site': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'skin': ('models.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'template': ('models.CharField', [], {'default': "'page.html'", 'max_length': '100'}),
'widgets': ('models.ManyToManyField', ["orm['widgets.Widget']"], {'through': "'WidgetInPage'"})
},
'widgets.renderedwidget': {
'Meta': {'unique_together': "(('widget','state','site',),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.userpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'site_copy': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID', 'null': 'True', 'blank': 'True'}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'db_index': 'True'})
},
'pages.sessionpage': {
'Meta': {'_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'session_key': ('models.CharField', ["_('session key')"], {'max_length': '40', 'db_index': 'True'}),
'site_copy': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID', 'null': 'True', 'blank': 'True'}),
'updated': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'False'})
}
}
complete_apps = ['pages']
|
bsd-3-clause
| -4,512,450,370,742,625,300
| 45.423077
| 145
| 0.495996
| false
| 3.803571
| false
| false
| false
|
cupy/cupy
|
tests/cupyx_tests/scipy_tests/linalg_tests/test_decomp_lu.py
|
1
|
4936
|
import unittest
import warnings
import numpy
import cupy
from cupy import testing
import cupyx.scipy.linalg
if cupyx.scipy._scipy_available:
import scipy.linalg
@testing.gpu
@testing.parameterize(*testing.product({
'shape': [(1, 1), (2, 2), (3, 3), (5, 5), (1, 5), (5, 1), (2, 5), (5, 2)],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLUFactor(unittest.TestCase):
@testing.for_dtypes('fdFD')
def test_lu_factor(self, dtype):
if self.shape[0] != self.shape[1]:
# skip non-square tests since scipy.lu_factor requires square
return unittest.SkipTest()
a_cpu = testing.shaped_random(self.shape, numpy, dtype=dtype)
a_gpu = cupy.asarray(a_cpu)
result_cpu = scipy.linalg.lu_factor(a_cpu)
result_gpu = cupyx.scipy.linalg.lu_factor(a_gpu)
assert len(result_cpu) == len(result_gpu)
assert result_cpu[0].dtype == result_gpu[0].dtype
assert result_cpu[1].dtype == result_gpu[1].dtype
cupy.testing.assert_allclose(result_cpu[0], result_gpu[0], atol=1e-5)
cupy.testing.assert_array_equal(result_cpu[1], result_gpu[1])
def check_lu_factor_reconstruction(self, A):
m, n = self.shape
lu, piv = cupyx.scipy.linalg.lu_factor(A)
# extract ``L`` and ``U`` from ``lu``
L = cupy.tril(lu, k=-1)
cupy.fill_diagonal(L, 1.)
L = L[:, :m]
U = cupy.triu(lu)
U = U[:n, :]
# check output shapes
assert lu.shape == (m, n)
assert L.shape == (m, min(m, n))
assert U.shape == (min(m, n), n)
assert piv.shape == (min(m, n),)
# apply pivot (on CPU since slaswp is not available in cupy)
piv = cupy.asnumpy(piv)
rows = numpy.arange(m)
for i, row in enumerate(piv):
if i != row:
rows[i], rows[row] = rows[row], rows[i]
PA = A[rows]
# check that reconstruction is close to original
LU = L.dot(U)
cupy.testing.assert_allclose(LU, PA, atol=1e-5)
@testing.for_dtypes('fdFD')
def test_lu_factor_reconstruction(self, dtype):
A = testing.shaped_random(self.shape, cupy, dtype=dtype)
self.check_lu_factor_reconstruction(A)
@testing.for_dtypes('fdFD')
def test_lu_factor_reconstruction_singular(self, dtype):
if self.shape[0] != self.shape[1]:
return unittest.SkipTest()
A = testing.shaped_random(self.shape, cupy, dtype=dtype)
A -= A.mean(axis=0, keepdims=True)
A -= A.mean(axis=1, keepdims=True)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
self.check_lu_factor_reconstruction(A)
@testing.gpu
@testing.parameterize(*testing.product({
'shape': [(1, 1), (2, 2), (3, 3), (5, 5), (1, 5), (5, 1), (2, 5), (5, 2)],
'permute_l': [False, True],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLU(unittest.TestCase):
@testing.for_dtypes('fdFD')
def test_lu(self, dtype):
a_cpu = testing.shaped_random(self.shape, numpy, dtype=dtype)
a_gpu = cupy.asarray(a_cpu)
result_cpu = scipy.linalg.lu(a_cpu, permute_l=self.permute_l)
result_gpu = cupyx.scipy.linalg.lu(a_gpu, permute_l=self.permute_l)
assert len(result_cpu) == len(result_gpu)
if not self.permute_l:
# check permutation matrix
result_cpu = list(result_cpu)
result_gpu = list(result_gpu)
P_cpu = result_cpu.pop(0)
P_gpu = result_gpu.pop(0)
cupy.testing.assert_array_equal(P_gpu, P_cpu)
cupy.testing.assert_allclose(result_gpu[0], result_cpu[0], atol=1e-5)
cupy.testing.assert_allclose(result_gpu[1], result_cpu[1], atol=1e-5)
@testing.for_dtypes('fdFD')
def test_lu_reconstruction(self, dtype):
m, n = self.shape
A = testing.shaped_random(self.shape, cupy, dtype=dtype)
if self.permute_l:
PL, U = cupyx.scipy.linalg.lu(A, permute_l=self.permute_l)
PLU = PL @ U
else:
P, L, U = cupyx.scipy.linalg.lu(A, permute_l=self.permute_l)
PLU = P @ L @ U
# check that reconstruction is close to original
cupy.testing.assert_allclose(PLU, A, atol=1e-5)
@testing.gpu
@testing.parameterize(*testing.product({
'trans': [0, 1, 2],
'shapes': [((4, 4), (4,)), ((5, 5), (5, 2))],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLUSolve(unittest.TestCase):
@testing.for_dtypes('fdFD')
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_lu_solve(self, xp, scp, dtype):
a_shape, b_shape = self.shapes
A = testing.shaped_random(a_shape, xp, dtype=dtype)
b = testing.shaped_random(b_shape, xp, dtype=dtype)
lu = scp.linalg.lu_factor(A)
return scp.linalg.lu_solve(lu, b, trans=self.trans)
|
mit
| 3,208,808,118,296,692,700
| 36.112782
| 78
| 0.595421
| false
| 3.031941
| true
| false
| false
|
bailabs/bench-v7
|
bench/commands/update.py
|
1
|
7760
|
import click
import sys, os
from bench.config.common_site_config import get_config
from bench.app import pull_all_apps, is_version_upgrade
from bench.utils import (update_bench, validate_upgrade, pre_upgrade, post_upgrade, before_update,
update_requirements, backup_all_sites, patch_sites, build_assets, restart_supervisor_processes)
from bench import patches
# TODO: Not DRY
@click.command('update')
@click.option('--pull', is_flag=True, help="Pull changes in all the apps in bench")
@click.option('--patch', is_flag=True, help="Run migrations for all sites in the bench")
@click.option('--build', is_flag=True, help="Build JS and CSS artifacts for the bench")
@click.option('--bench', is_flag=True, help="Update bench")
@click.option('--requirements', is_flag=True, help="Update requirements")
@click.option('--restart-supervisor', is_flag=True, help="restart supervisor processes after update")
@click.option('--auto', is_flag=True)
@click.option('--upgrade', is_flag=True, help="Required for major version updates")
@click.option('--no-backup', is_flag=True)
@click.option('--force', is_flag=True)
@click.option('--reset', is_flag=True,
help="Hard resets git branch's to their new states overriding any changes and overriding rebase on pull")
@click.option('--force_frappe', is_flag=True)
def update(pull=False, patch=False, build=False, bench=False, auto=False,
restart_supervisor=False, requirements=False,
no_backup=False, upgrade=False, force=False, reset=False,
force_frappe = False):
"Update bench"
print "force_frape {0}".format(force_frappe)
if not (pull or patch or build or bench or requirements):
pull, patch, build, bench, requirements = True, True, True, True, True
if auto:
sys.exit(1)
patches.run(bench_path='.')
conf = get_config(".")
# if bench and conf.get('update_bench_on_update'):
# update_bench()
# restart_update({
# 'pull': pull,
# 'patch': patch,
# 'build': build,
# 'requirements': requirements,
# 'no-backup': no_backup,
# 'restart-supervisor': restart_supervisor,
# 'upgrade': upgrade,
# 'reset': reset
# })
if conf.get('release_bench'):
print 'Release bench, cannot update'
sys.exit(1)
version_upgrade = is_version_upgrade()
if version_upgrade[0] and not upgrade:
print
print
print "This update will cause a major version change in Frappe/ERPNext from {0} to {1}.".format(
*version_upgrade[1:])
print "This would take significant time to migrate and might break custom apps. Please run `bench update --upgrade` to confirm."
print
print "You can stay on the latest stable release by running `bench switch-to-master` or pin your bench to {0} by running `bench switch-to-v{0}`".format(
version_upgrade[1])
sys.exit(1)
_update(pull, patch, build, bench, auto, restart_supervisor, requirements, no_backup, upgrade, force=force,
reset=reset,force_frappe=force_frappe)
def _update(pull=False, patch=False, build=False, update_bench=False, auto=False, restart_supervisor=False,
requirements=False, no_backup=False, upgrade=False, bench_path='.', force=False, reset=False,
force_frappe=False):
conf = get_config(bench_path=bench_path)
version_upgrade = is_version_upgrade(bench_path=bench_path)
# if version_upgrade[0] and not upgrade:
# raise Exception("Major Version Upgrade")
#
# if upgrade and (version_upgrade[0] or (not version_upgrade[0] and force)):
# validate_upgrade(version_upgrade[1], version_upgrade[2], bench_path=bench_path)
before_update(bench_path=bench_path, requirements=requirements)
if pull:
pull_all_apps(bench_path=bench_path, reset=reset,force_frappe=force_frappe)
# if requirements:
# update_requirements(bench_path=bench_path)
if upgrade and (version_upgrade[0] or (not version_upgrade[0] and force)):
pre_upgrade(version_upgrade[1], version_upgrade[2], bench_path=bench_path)
import bench.utils, bench.app
reload(bench.utils)
reload(bench.app)
if patch:
if not no_backup:
backup_all_sites(bench_path=bench_path)
patch_sites(bench_path=bench_path)
if build:
build_assets(bench_path=bench_path)
if upgrade and (version_upgrade[0] or (not version_upgrade[0] and force)):
post_upgrade(version_upgrade[1], version_upgrade[2], bench_path=bench_path)
if restart_supervisor or conf.get('restart_supervisor_on_update'):
restart_supervisor_processes(bench_path=bench_path)
print "_" * 80
print "Bench: Open source installer + admin for Frappe and ERPNext (https://erpnext.com)"
print
@click.command('retry-upgrade')
@click.option('--version', default=5)
def retry_upgrade(version):
pull_all_apps()
patch_sites()
build_assets()
post_upgrade(version - 1, version)
def restart_update(kwargs):
args = ['--' + k for k, v in kwargs.items() if v]
os.execv(sys.argv[0], sys.argv[:2] + args)
@click.command('switch-to-branch')
@click.argument('branch')
@click.argument('apps', nargs=-1)
@click.option('--upgrade', is_flag=True)
def switch_to_branch(branch, apps, upgrade=False,force_frappe=False):
"Switch all apps to specified branch, or specify apps separated by space"
from bench.app import switch_to_branch
switch_to_branch(branch=branch, apps=list(apps), upgrade=upgrade)
print 'Switched to ' + branch
print 'Please run `bench update --patch` to be safe from any differences in database schema'
@click.command('switch-to-master')
@click.option('--upgrade', is_flag=True)
@click.option('--force_frappe', is_flag=True)
def switch_to_master(upgrade=False,force_frappe=False):
"Switch frappe and erpnext to master branch"
from bench.app import switch_to_master
switch_to_master(upgrade=upgrade, apps=['frappe', 'erpnext'],force_frappe=force_frappe)
print
print 'Switched to master'
print 'Please run `bench update --patch` to be safe from any differences in database schema'
@click.command('switch-to-develop')
@click.option('--upgrade', is_flag=True)
def switch_to_develop(upgrade=False):
"Switch frappe and erpnext to develop branch"
from bench.app import switch_to_develop
switch_to_develop(upgrade=upgrade, apps=['frappe', 'erpnext'])
print
print 'Switched to develop'
print 'Please run `bench update --patch` to be safe from any differences in database schema'
@click.command('switch-to-v4')
@click.option('--upgrade', is_flag=True)
def switch_to_v4(upgrade=False):
"Switch frappe and erpnext to v4 branch"
from bench.app import switch_to_v4
switch_to_v4(upgrade=upgrade)
print
print 'Switched to v4'
print 'Please run `bench update --patch` to be safe from any differences in database schema'
@click.command('switch-to-v5')
@click.option('--upgrade', is_flag=True)
def switch_to_v5(upgrade=False):
"Switch frappe and erpnext to v5 branch"
from bench.app import switch_to_v5
switch_to_v5(upgrade=upgrade)
print
print 'Switched to v5'
print 'Please run `bench update --patch` to be safe from any differences in database schema'
@click.command('switch-to-v7')
@click.option('--upgrade', is_flag=True)
def switch_to_v7(upgrade=False):
"Switch frappe and erpnext to v7 branch"
from bench.app import switch_to_v7
switch_to_v7(upgrade=upgrade)
print
print 'Switched to v7'
print 'Please run `bench update --patch` to be safe from any differences in database schema'
|
gpl-3.0
| 4,466,500,918,880,796,700
| 38.794872
| 160
| 0.677835
| false
| 3.579336
| false
| false
| false
|
Maleus/Enumerator
|
enumerator/lib/generic_service.py
|
1
|
2366
|
#!/usr/bin/env python
"""GenericService encapsulates any
methods which are common across all
service modules.
@author: Steve Coward (steve<at>sugarstack.io)
@version 1.0
"""
import os
import re
class GenericService(object):
static_path = '%s/../static' % os.path.dirname(os.path.realpath(__file__))
compiled_service_definition = None
def __init__(self):
self.compiled_service_definition = self.compile_service_definition(
self.SERVICE_DEFINITION)
def compile_service_definition(self, definition):
"""Take a string of key:values and parse
the values into a python interpretable
conditional statement.
@param definition: String used to classify
a service.
"""
rule_parser_pattern = re.compile('([^\s]+\s)?(\w+):([^\s]+)')
rule = []
for rule_set in rule_parser_pattern.findall(definition):
conditional, key, values = map(str.strip, rule_set)
# Determine if values need to be split apart.
# Rule: If there are no '-' values at the beginning of each value, we can use a list.
# Rule: If there are '-' values at the beginning of each value,
# split apart.
if len([val for val in values.split(',') if val[0] == '-']):
values_set = values.split(',')
for value in values_set:
if value[0] == '-':
rule.append('"%s" not in %s' % (value[1:], key))
else:
rule.append('"%s" in %s' % (value, key))
else:
values_set = values.split(',')
rule.append('%s %s in %s' % (conditional, key, values_set))
return ' and '.join(rule).replace('and or', 'or')
def is_valid_service(self, attributes):
"""Returns True or False if the attributes
of a service record match the definition of
a service.
@param attributes: Dict value of a scanned service
(service,port,state).
"""
service = attributes.get('service')
port = attributes.get('port')
state = attributes.get('state')
if state != 'open':
return False
# The keys in rule will map to service, port and status set above.
return eval(self.compiled_service_definition)
|
mit
| -7,374,674,132,465,793,000
| 34.313433
| 97
| 0.569315
| false
| 4.217469
| false
| false
| false
|
buguen/pylayers
|
pylayers/util/cone.py
|
1
|
20331
|
#-*- coding:Utf-8 -*-
r"""
Class Cone
==========
The following conventions are adopted
+ A cone has an **apex** which is a point in the plane.
+ A cone has two vectors which define the cone aperture. The order of those two vectors
matters (u) is the starting vector (u) and (v) the ending vector.
The cone region is defined by the convex angular sector going from starting
vector :math:`\mathbf{u}` to the ending vector :math:`\mathbf{v}`
rotating in the plane in following the trigonometric rotation convention.
The modulus of the cross product between :math:`\mathbf{u}` and :math:`\mathbf{v}` is positive.
:math:`\mathbf{u} \times \mathbf{v} = \alpha \mathbf{z} \;\; \textrm{with} \;\;\alpha > 0`
.. autosummary::
:toctree:
"""
import numpy as np
import doctest
import shapely as shp
import matplotlib.pyplot as plt
import pylayers.util.geomutil as geu
import pylayers.util.plotutil as plu
from pylayers.util.project import *
from matplotlib.path import Path
import matplotlib.patches as patches
import pdb
import logging
class Cone(PyLayers):
def __init__(self, a=np.array([1,0]), b = np.array([0,1]), apex=np.array([0, 0])):
"""
a : np.array (,2)
basis vector
b : np.array (,2)
apex : np.array (,2)
"""
self.apex = apex
# normalizing cone vectors
an = a/np.sqrt(np.dot(a,a))
bn = b/np.sqrt(np.dot(b,b))
if np.cross(an,bn) > 0:
self.u = an
self.v = bn
else:
self.u = bn
self.v = an
# -1 < gamma < 1
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross<>0:
self.degenerated = False
else:
self.degenerated = True
# update cone angle and probability
self.upd_angle()
def __repr__(self):
st = 'Cone object \n'
st = st+'----------------\n'
st = st + "Apex : " + str(self.apex)+'\n'
st = st + "u :" + str(self.u)+'\n'
st = st + "v :" + str(self.v)+'\n'
st = st + "cross : " + str(self.cross)+'\n'
st = st + "dot : " + str(self.dot)+'\n'
st = st + "angle : " + str(self.angle*180/np.pi)+'\n'
st = st + "pcone : " + str(self.pcone)+'\n'
if hasattr(self,'seg0'):
st = st + "from segments ( (xta,xhe) , (yta,yhe) )\n"
st = st + " seg0 : " + str(tuple(self.seg0))+'\n'
st = st + " seg1 : " + str(tuple(self.seg1))+'\n'
return(st)
def upd_angle(self):
"""update cone angle attribute
and associated probability of the Cone object
"""
self.angle = np.arccos(self.dot)
self.pcone = self.angle/(1.0*np.pi)
def belong_seg(self,pta,phe,prob=True,visu=False):
""" test if segment belong to cone
Parameters
----------
pta : np.array (2xNseg)
phe : np.array (2xNseg)
Returns
-------
typ : int
0 : no visibility
1 : full visibility
2 : he.v
3 : ta.v
4 : ta.u
5 : he.u
6 : inside
proba : float
geometric probability
Notes
-----
A segment belongs to the cone if not all termination points
lie in the same side outside the cone.
See Also
--------
outside_point
"""
if visu:
f,a = self.show()
plu.displot(pta,phe,fig=f,ax=a)
plt.show()
vc = (self.u+self.v)/2
#vcn = vc/np.sqrt(np.dot(vc,vc))
w = vc/np.sqrt(np.dot(vc,vc))
w = w.reshape(2,1)
#w = np.array([vcn[1],-vcn[0]])
ptama = pta - self.apex[:,None]
phema = phe - self.apex[:,None]
dtaw = np.sum(ptama*w,axis=0)
dhew = np.sum(phema*w,axis=0)
blta = (dtaw>=0)|(np.isclose(dtaw,0.))
blhe = (dhew>=0)|(np.isclose(dhew,0.))
#if 'seg1' in self.__dict__:
# pa = self.seg1[:,0].reshape(2,1)
# pb = (self.seg1[:,0]+w).reshape(2,1)
#else:
# pa = self.apex.reshape(2,1)
# pb = pa+w.reshape(2,1)
#blta = geu.isleft(pa,pb,pta)
#blhe = geu.isleft(pa,pb,phe)
# segment candidate for being above segment 1 (,Nseg)
boup = blta & blhe
# type of segment
if prob:
proba = np.zeros(np.shape(pta)[1])
else :
proba =[]
typ = np.zeros(np.shape(pta)[1])
# is tail out ? bo1 | bo2
# btaol : boolean tail out left
# btaor : boolean tail out right
# bheol : boolean head out left
# bheor : boolean head out right #
# among upper segment check position wrt cone
#btaol,btaor = self.outside_point(pta)
#bheol,bheor = self.outside_point(phe)
btaor,btaol = self.outside_point(pta)
bheor,bheol = self.outside_point(phe)
# tail and head are they out cone on the same side ?
# if the two termination points are not on the same side of the cone
# --> segment is in.
# boin = (~((btaol&bheol)|(btaor&bheor)))&boup
# full interception (proba to reach = 1)
bfull = ((btaol&bheor)|(btaor&bheol))&boup
if prob :
proba[bfull] = 1
typ[bfull] = 1
#(he-apex).v
btalhein = (btaol & ~bheol & ~bheor)&boup
if (prob and not (btalhein==False).all()):
v2 = phe[:,btalhein]-self.apex.reshape(2,1)
vn2 = v2/np.sqrt(np.sum(v2*v2,axis=0))
vvn2 = np.dot(self.v,vn2)
# paranoid verification of scalar product \in [-1,1]
vvn2 = np.minimum(vvn2,np.ones(len(vvn2)))
vvn2 = np.maximum(vvn2,-np.ones(len(vvn2)))
pr2 = np.arccos(vvn2)/self.angle
proba[btalhein] = pr2
typ[btalhein] = 2
#(ta-apex).v
bheltain = (bheol & ~btaol & ~btaor)&boup
if (prob and not (bheltain==False).all()):
v3 = pta[:,bheltain]-self.apex.reshape(2,1)
vn3 = v3/np.sqrt(np.sum(v3*v3,axis=0))
vvn3 = np.dot(self.v,vn3)
vvn3 = np.minimum(vvn3,np.ones(len(vvn3)))
vvn3 = np.maximum(vvn3,-np.ones(len(vvn3)))
pr3 = np.arccos(vvn3)/self.angle
proba[bheltain] = pr3
typ[bheltain] = 3
#ta.u
bhertain = (bheor & ~btaol & ~btaor)&boup
if (prob and not(bhertain==False).all()):
v4 = pta[:,bhertain]-self.apex.reshape(2,1)
vn4 = v4/np.sqrt(np.sum(v4*v4,axis=0))
vvn4 = np.dot(self.u,vn4)
vvn4 = np.minimum(vvn4,np.ones(len(vvn4)))
vvn4 = np.maximum(vvn4,-np.ones(len(vvn4)))
pr4 = np.arccos(vvn4)/self.angle
proba[bhertain] = pr4
typ[bhertain] = 4
#he.u
btarhein = (btaor & ~bheol & ~bheor)&boup
if (prob and not(btarhein==False).all()):
v5 = phe[:,btarhein]-self.apex.reshape(2,1)
vn5 = v5/np.sqrt(np.sum(v5*v5,axis=0))
vvn5 = np.dot(self.u,vn5)
vvn5 = np.minimum(vvn5,np.ones(len(vvn5)))
vvn5 = np.maximum(vvn5,-np.ones(len(vvn5)))
pr5 = np.arccos(vvn5)/self.angle
proba[btarhein] = pr5
typ[btarhein] = 5
#ta.he
btainhein = (~btaol & ~btaor & ~bheol & ~bheor)&boup
if (prob and not (btainhein==0).all()):
va = pta[:,btainhein]-self.apex.reshape(2,1)
vb = phe[:,btainhein]-self.apex.reshape(2,1)
vna = va/np.sqrt(np.sum(va*va,axis=0))
vnb = vb/np.sqrt(np.sum(vb*vb,axis=0))
# dot product vna,vnb
vnab = np.sum(vna*vnb,axis=0)
vnab = np.minimum(vnab,np.ones(len(vnab)))
vnab = np.maximum(vnab,-np.ones(len(vnab)))
pr6 = np.arccos(vnab)/self.angle
proba[btainhein] = pr6
typ[btainhein] = 6
return(typ,proba)
def above_seg(self):
"""
"""
vc = (self.u+self.v)/2
vcn = vc/np.sqrt(dot(vc,vc))
w = np.array([vcn[1],-vcn[0]])
self.pa = self.seg1[:,0].reshape(2,1)
self.pb = (self.seg1[:,0]+w).reshape(2,1)
def outside_point(self,p):
""" check if p is outside the cone
Parameters
----------
p : np.array (2xNp)
Returns
-------
~b1 & ~b2 : boolean (outside on the left) (,Np)
b1 & b2 : boolean (outside on the right) (,Np)
Examples
--------
Notes
-----
If one of the two output booleans is True the point is outside
There are 2 output bits but only 3 states due to (uv) convention.
v u
p \ / lv & lu
\/
\p /
\/ ~lv & lu
\ / p
\/ ~lu & ~lv
"""
a = self.apex[:,None]
# b = a + self.u.reshape(2,1)
# c = a + self.v.reshape(2,1)
b = a + self.u[:,None]
c = a + self.v[:,None]
p0a0 = p[0,:]-a[0,:]
p1a1 = p[1,:]-a[1,:]
lu = ((b[0,:]-a[0,:])* p1a1 - ((b[1,:]-a[1,:])* p0a0 ))>0
lv = ((c[0,:]-a[0,:])* p1a1 - ((c[1,:]-a[1,:])* p0a0 ))>0
return(~lu & ~lv , lu & lv)
def belong_point2(self,p):
"""
Parameters
----------
p : np.array (Ndim x Npoints)
"""
a = self.apex[:,np.newaxis]
b = a + self.u.reshape(2,1)
c = a + self.v.reshape(2,1)
p1a1 = p[1,:]-a[1,:]
p0a0 = p[0,:]-a[0,:]
b1 = ((b[0,:]-a[0,:])* p1a1 - ((b[1,:]-a[1,:])* p0a0 ))>0
b2 = ((c[0,:]-a[0,:])* p1a1 - ((c[1,:]-a[1,:])* p0a0 ))>0
return(b1^b2)
def belong_point(self, p):
""" test if p belongs to Cone
Parameters
----------
p : np.array (Ndim x Npoints)
Returns
-------
b : np.array boolean (1xNpoints)
"""
# Ndim x Npoints
if not self.degenerated:
pt = p - self.apex[:,np.newaxis]
#puv = np.sum(self.bv[:,:,np.newaxis]*pt[:,np.newaxis,:],axis=0)
#alpha = puv[0,:]-self.gamma*puv[1,:]
#beta = puv[1,:]-self.gamma*puv[0,:]
pu = np.sum(self.u[:,np.newaxis]*pt,axis=0)
pv = np.sum(self.v[:,np.newaxis]*pt,axis=0)
alpha = pu-self.dot*pv
beta = pv-self.dot*pu
b = (beta>0)&(alpha>0)
else:
a0 = self.seg0[:,0]
b0 = self.seg0[:,1]
if self.u[0]<>0:
slope = self.u[1]/self.u[0]
y0 = a0[1]-slope*a0[0]
y1 = b0[1]-slope*b0[0]
b = (p[1,:] > slope*p[0,:] + min(y0,y1) ) & (p[1,:]<slope*p[0,:]+max(y0,y1) )
else:
b = (p[0,:] > min(a0[0],b0[0]) ) & (p[0,:]< max(a0[0],b0[0]) )
return(b)
def above(self, p):
""" check if above
Parameters
----------
p :
"""
bo1 = self.belong(p)
pb = p[:,bo1]
if self.v[1]<>0:
slope1 = self.v[1]/self.v[0]
b1 = self.v[1] - slope1*self.v[0]
bo2 = pb[1,:] > slope1*pb[0,:]+b
else:
bo2 = pb[1,:] > self.seg1[1,0]
return(bo1,bo2)
def fromptseg(self,pt,seg):
""" creates a Cone from one point and one segment
Parameters
----------
pt : nd.array (,2)
seg : nd.array (2,2)
"""
self.apex = pt
a = seg[:,0]
b = seg[:,1]
v0 = b - pt
v1 = a - pt
v0n = v0/np.sqrt(np.dot(v0,v0))
v1n = v1/np.sqrt(np.dot(v1,v1))
if np.cross(v0n,v1n) > 0:
self.u = v0n
self.v = v1n
self.seg1 = seg
else:
self.u = v1n
self.v = v0n
self.seg1 = seg[:,::-1]
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
self.upd_angle()
def from2segs(self,seg0,seg1):
""" creates a Cone from 2 segments
Parameters
----------
seg0 : 2 x 2 (Ndim x Npoints)
seg1 : 2 x 2
Notes
-----
The only way for the cone to be degenerated is when the two segments are on the same line.
See Also
--------
pylayers.gis.layout.Layout.buildGi
"""
# bv : (4,1)
self.seg0 = seg0
self.seg1 = seg1
a0 = seg0[:,0]
b0 = seg0[:,1]
a1 = seg1[:,0]
b1 = seg1[:,1]
# check for connected segments (This could be determined earlier)
# a0 = a1 | b1
# b0 = a1 | b1
# check segment orientation (crossing)
if not (geu.ccw(a0,b0,b1) ^
geu.ccw(b0,b1,a1) ):
v0 = (b1 - a0)
v1 = (a1 - b0)
twisted = True
else:
v0 = (a1 - a0)
v1 = (b1 - b0)
twisted = False
v0n = v0/np.sqrt(np.dot(v0,v0))
v1n = v1/np.sqrt(np.dot(v1,v1))
if np.cross(v0n,v1n) > 0:
self.u = v0n
self.v = v1n
inversion = False
else:
self.u = v1n
self.v = v0n
inversion = True
if (not twisted) & (not inversion) :
#reverse seg1
#print "reverse seg1"
self.seg1 = self.seg1[:,::-1]
if (inversion) & (not twisted):
#reverse seg0
#print "reverse seg0"
self.seg0 = self.seg0[:,::-1]
if twisted & inversion:
#reverse seg0 and seg1
#print "reverse seg0"
#print "reverse seg1"
self.seg0 = self.seg0[:,::-1]
self.seg1 = self.seg1[:,::-1]
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
else:
a0u = np.dot(self.seg0[:,0],self.u)
a0v = np.dot(self.seg0[:,0],self.v)
b0u = np.dot(self.seg0[:,1],self.u)
b0v = np.dot(self.seg0[:,1],self.v)
kb = ((b0v-a0v)-self.dot*(b0u-a0u))/(self.dot*self.dot-1)
self.apex = self.seg0[:,1] + kb*self.v
self.upd_angle()
def from2csegs(self,seg0,seg1):
""" creates a Cone from 2 connected segments
Parameters
----------
seg0 : 2 x 2 (Ndim x Npoints)
seg1 : 2 x 2
Notes
-----
The only way for the cone to be degenerated is when the two segments are on the same line.
Examples
--------
>>> from pylayers.util.cone import *
>>> import matplotlib.pyplot as plt
>>> cn = Cone()
>>> f,a = cn.show()
>>> plt.show()
"""
# bv : (4,1)
self.seg0 = seg0
self.seg1 = seg1
a0 = seg0[:,0]
b0 = seg0[:,1]
a1 = seg1[:,0]
b1 = seg1[:,1]
# determine common point
if (np.dot(a0-a1,a0-a1)<1e-8):
p = a0
u = b1-p
v = p-b0
elif (np.dot(a0-b1,a0-b1)<1e-8):
p = a0
u = a1-p
v = p-b0
self.seg1 = self.seg1[:,::-1]
elif (np.dot(b0-a1,b0-a1)<1e-8):
p = b0
self.seg0 = self.seg0[:,::-1]
u = b1-p
v = p-a0
elif (np.dot(b0-b1,b0-b1)<1e-8):
self.seg0 = self.seg0[:,::-1]
self.seg1 = self.seg1[:,::-1]
p = b0
u = a1-p
v = p-a0
else:
logging.critical('segment are not connected')
pdb.set_trace()
self.apex = p
self.v = v/np.sqrt(np.dot(v,v))
self.u = u/np.sqrt(np.dot(u,u))
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross<0:
self.u , self.v = self.v , self.u
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
self.upd_angle()
def show(self, **kwargs):
""" show cone
Parameters
----------
length : float
"""
defaults = {'length': 15.}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'seg1' not in self.__dict__:
verts = [tuple(self.apex),
tuple(self.apex + kwargs['length'] * self.u),
tuple(self.apex + kwargs['length'] * self.v),
tuple(self.apex)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
else:
a1 = self.seg1[:,0]
b1 = self.seg1[:,1]
if 'seg0' not in self.__dict__:
a0 = self.apex
b0 = self.apex
else:
a0 = self.seg0[:,0]
b0 = self.seg0[:,1]
if not(self.degenerated):
#verts = [tuple(self.apex),
# tuple(a1),
# tuple(b1),
# tuple(self.apex)
# ]
verts = [tuple(self.apex),
tuple(self.apex + kwargs['length'] * self.u),
tuple(self.apex + kwargs['length'] * self.v),
tuple(self.apex)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
else:
if (geu.ccw(a0,b0,b1) ^
geu.ccw(b0,b1,a1) ):
verts = [tuple(b0),
tuple(a1),
tuple(b1),
tuple(a0),
tuple(b0)
]
else:
verts = [tuple(b0),
tuple(b1),
tuple(a1),
tuple(a0),
tuple(b0)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
if 'fig' not in kwargs:
fig = plt.figure(figsize=(10,10))
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
ax.plot([self.apex[0],self.apex[0]+kwargs['length']*self.u[0]],
[self.apex[1],self.apex[1]+kwargs['length']*self.u[1]],lw=1,color='b')
ax.plot([self.apex[0],self.apex[0]+kwargs['length']*self.v[0]],
[self.apex[1],self.apex[1]+kwargs['length']*self.v[1]],lw=1,color='r')
theta1 = np.arctan2(self.u[1],self.u[0])*180/np.pi
#print theta1
theta2 = np.arctan2(self.v[1],self.v[0])*180/np.pi
#print theta2
angle = self.angle*180/np.pi
#print angle
arc = patches.Arc((self.apex[0],self.apex[1]),kwargs['length'],kwargs['length'],theta1=theta1,theta2=theta2,linewidth=2)
ax.add_patch(arc)
if 'seg0' in self.__dict__:
ax.plot([a0[0],b0[0]],[a0[1],b0[1]],lw=2,color='b')
if 'seg1' in self.__dict__:
ax.plot([a1[0],b1[0]],[a1[1],b1[1]],lw=2,color='r')
patch = patches.PathPatch(path, facecolor='orange', lw=2, alpha=0.3)
ax.add_patch(patch)
ax.axis('equal')
# ax.set_xlim(-2,2)
# ax.set_ylim(-2,2)
return(fig, ax)
if __name__ == '__main__':
plt.ion()
doctest.testmod()
|
lgpl-3.0
| 8,532,794,341,175,884,000
| 27.395251
| 128
| 0.4471
| false
| 3.071148
| false
| false
| false
|
danylaksono/inasafe
|
safe/messaging/item/row.py
|
1
|
2919
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid - **Row**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '04/06/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from message_element import MessageElement
from exceptions import InvalidMessageItemError
from cell import Cell
#FIXME (MB) remove when all to_* methods are implemented
#pylint: disable=W0223
class Row(MessageElement):
"""A class to model table rows in the messaging system """
def __init__(self, *args, **kwargs):
"""Creates a row object
Args:
args can be list or Cell
Returns:
None
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
super(Row, self).__init__(**kwargs)
self.cells = []
for arg in args:
self.add(arg)
def add(self, item):
"""add a Cell to the row
list can be passed and are automatically converted to Cell
Args:
item an element to add to the Cells can be list or Cell object
Returns:
None
Raises:
Errors are propagated
"""
if isinstance(item, basestring) or self._is_qstring(item):
self.cells.append(Cell(item))
elif isinstance(item, Cell):
self.cells.append(item)
elif isinstance(item, list):
for i in item:
self.cells.append(Cell(i))
else:
raise InvalidMessageItemError(item, item.__class__)
def to_html(self):
"""Render a Text MessageElement as html
Args:
None
Returns:
Str the html representation of the Text MessageElement
Raises:
Errors are propagated
"""
row = '<tr%s>\n' % self.html_attributes()
for cell in self.cells:
row += cell.to_html()
row += '</tr>\n'
return row
def to_text(self):
"""Render a Text MessageElement as plain text
Args:
None
Returns:
Str the plain text representation of the Text MessageElement
Raises:
Errors are propagated
"""
row = '---\n'
for cell in self.cells:
row += cell
row += '---'
return row
|
gpl-3.0
| 1,648,468,602,593,973,200
| 24.605263
| 78
| 0.578623
| false
| 4.382883
| false
| false
| false
|
rahulunair/nova
|
nova/tests/unit/api/openstack/compute/test_serversV21.py
|
1
|
378354
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import ddt
import functools
import fixtures
import iso8601
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
import webob
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack import compute
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.schemas import servers as servers_schema
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import views
from nova.api.openstack import wsgi as os_wsgi
from nova import availability_zones
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import models
from nova import exception
from nova.image import glance
from nova import objects
from nova.objects import instance as instance_obj
from nova.objects.instance_group import InstanceGroup
from nova.objects import tag
from nova.policies import servers as server_policies
from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import utils as nova_utils
CONF = nova.conf.CONF
FAKE_UUID = fakes.FAKE_UUID
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
GET_ONLY_FIELDS = ['OS-EXT-AZ:availability_zone', 'config_drive',
'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hypervisor_hostname',
'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:hostname',
'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-SRV-ATTR:ramdisk_id',
'OS-EXT-SRV-ATTR:reservation_id',
'OS-EXT-SRV-ATTR:root_device_name',
'OS-EXT-SRV-ATTR:user_data', 'host_status',
'key_name', 'OS-SRV-USG:launched_at',
'OS-SRV-USG:terminated_at',
'OS-EXT-STS:task_state', 'OS-EXT-STS:vm_state',
'OS-EXT-STS:power_state', 'security_groups',
'os-extended-volumes:volumes_attached']
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, values):
inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
name=values.get('display_name'))
inst = dict(inst, **values)
return inst
def fake_compute_api(cls, req, id):
return True
def fake_start_stop_not_ready(self, context, instance):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_start_stop_invalid_state(self, context, instance):
raise exception.InstanceInvalidState(
instance_uuid=instance['uuid'], attr='fake_attr',
method='fake_method', state='fake_state')
def fake_instance_get_by_uuid_not_found(context, uuid,
columns_to_join, use_slave=False):
raise exception.InstanceNotFound(instance_id=uuid)
def fake_instance_get_all_with_locked(context, list_locked, **kwargs):
obj_list = []
s_id = 0
for locked in list_locked:
uuid = fakes.get_fake_uuid(locked)
s_id = s_id + 1
kwargs['locked_by'] = None if locked == 'not_locked' else locked
server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
def fake_instance_get_all_with_description(context, list_desc, **kwargs):
obj_list = []
s_id = 0
for desc in list_desc:
uuid = fakes.get_fake_uuid(desc)
s_id = s_id + 1
kwargs['display_description'] = desc
server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
def fake_compute_get_empty_az(*args, **kwargs):
inst = fakes.stub_instance(vm_state=vm_states.ACTIVE,
availability_zone='')
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_bdms_get_all_by_instance_uuids(*args, **kwargs):
return [
fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'volume_id': 'some_volume_1',
'instance_uuid': FAKE_UUID,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
}),
fake_block_device.FakeDbBlockDeviceDict({
'id': 2,
'volume_id': 'some_volume_2',
'instance_uuid': FAKE_UUID,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
}),
]
def fake_get_inst_mappings_by_instance_uuids_from_db(*args, **kwargs):
return [{
'id': 1,
'instance_uuid': UUID1,
'cell_mapping': {
'id': 1, 'uuid': uuids.cell1, 'name': 'fake',
'transport_url': 'fake://nowhere/', 'updated_at': None,
'database_connection': uuids.cell1, 'created_at': None,
'disabled': False},
'project_id': 'fake-project'
}]
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance_id, password):
self.instance_id = instance_id
self.password = password
class ControllerTest(test.TestCase):
project_id = fakes.FAKE_PROJECT_ID
path = '/%s/servers' % project_id
path_v2 = '/v2' + path
path_with_id = path + '/%s'
path_with_id_v2 = path_v2 + '/%s'
path_with_query = path + '?%s'
path_detail = path + '/detail'
path_detail_v2 = path_v2 + '/detail'
path_detail_with_query = path_detail + '?%s'
path_action = path + '/%s/action'
def setUp(self):
super(ControllerTest, self).setUp()
fakes.stub_out_nw_api(self)
fakes.stub_out_key_pair_funcs(self)
fake.stub_out_image_service(self)
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])
return_server = fakes.fake_compute_get(id=2, availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
return_servers = fakes.fake_compute_get_all()
# Server sort keys extension is enabled in v21 so sort data is passed
# to the instance API and the sorted DB API is invoked
self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
compute_api.API, 'get_all', side_effect=return_servers)).mock
self.mock_get = self.useFixture(fixtures.MockPatchObject(
compute_api.API, 'get', side_effect=return_server)).mock
self.stub_out('nova.db.api.instance_update_and_get_original',
instance_update_and_get_original)
self.stub_out('nova.db.api.'
'block_device_mapping_get_all_by_instance_uuids',
fake_bdms_get_all_by_instance_uuids)
self.stub_out('nova.objects.InstanceMappingList.'
'_get_by_instance_uuids_from_db',
fake_get_inst_mappings_by_instance_uuids_from_db)
self.flags(group='glance', api_servers=['http://localhost:9292'])
self.controller = servers.ServersController()
self.ips_controller = ips.IPsController()
policy.reset()
policy.init()
self.addCleanup(policy.reset)
# Assume that anything that hits the compute API and looks for a
# RequestSpec doesn't care about it, since testing logic that deep
# should be done in nova.tests.unit.compute.test_compute_api.
mock_reqspec = mock.patch('nova.objects.RequestSpec')
mock_reqspec.start()
self.addCleanup(mock_reqspec.stop)
# Similarly we shouldn't care about anything hitting conductor from
# these tests.
mock_conductor = mock.patch.object(
self.controller.compute_api, 'compute_task_api')
mock_conductor.start()
self.addCleanup(mock_conductor.stop)
class ServersControllerTest(ControllerTest):
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_instance_lookup_targets(self, mock_get_im, mock_get_inst):
ctxt = context.RequestContext('fake', self.project_id)
mock_get_im.return_value.cell_mapping.database_connection = uuids.cell1
self.controller._get_instance(ctxt, 'foo')
mock_get_im.assert_called_once_with(ctxt, 'foo')
self.assertIsNotNone(ctxt.db_connection)
def test_requested_networks_prefix(self):
"""Tests that we no longer support the legacy br-<uuid> format for
a network id.
"""
uuid = 'br-00000000-0000-0000-0000-000000000000'
requested_networks = [{'uuid': uuid}]
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
self.assertIn('Bad networks format: network uuid is not in proper '
'format', six.text_type(ex))
def test_requested_networks_enabled_with_port(self):
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_enabled_with_network(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None)], res.as_tuples())
def test_requested_networks_enabled_with_network_and_port(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_requested_networks_with_and_duplicate_networks(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
requested_networks = [{'uuid': network}, {'uuid': network}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(network, None, None, None),
(network, None, None, None)], res.as_tuples())
def test_requested_networks_enabled_conflict_on_fixed_ip(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
addr = '10.0.0.1'
requested_networks = [{'uuid': network,
'fixed_ip': addr,
'port': port}]
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._get_requested_networks,
requested_networks)
def test_requested_networks_api_enabled_with_v2_subclass(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network, 'port': port}]
res = self.controller._get_requested_networks(requested_networks)
self.assertEqual([(None, None, port, None)], res.as_tuples())
def test_get_server_by_uuid(self):
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
def test_get_server_joins(self):
def fake_get(*args, **kwargs):
expected_attrs = kwargs['expected_attrs']
self.assertEqual(['flavor', 'info_cache', 'metadata',
'numa_topology'], expected_attrs)
ctxt = context.RequestContext('fake', self.project_id)
return fake_instance.fake_instance_obj(
ctxt, expected_attrs=expected_attrs)
self.mock_get.side_effect = fake_get
req = self.req(self.path_with_id % FAKE_UUID)
self.controller.show(req, FAKE_UUID)
def test_unique_host_id(self):
"""Create two servers with the same host and different
project_ids and check that the host_id's are unique.
"""
def return_instance_with_host(context, *args, **kwargs):
project_id = uuidutils.generate_uuid()
return fakes.stub_instance_obj(context, id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
req = self.req(self.path_with_id % FAKE_UUID)
self.mock_get.side_effect = return_instance_with_host
server1 = self.controller.show(req, FAKE_UUID)
server2 = self.controller.show(req, FAKE_UUID)
self.assertNotEqual(server1['server']['hostId'],
server2['server']['hostId'])
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
return {
"server": {
"id": uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": progress,
"name": "server2",
"status": status,
"hostId": '',
"image": {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
]
},
"metadata": {
"seq": "2",
},
"links": [
{
"rel": "self",
"href": "http://localhost%s/%s" % (self.path_v2, uuid),
},
{
"rel": "bookmark",
"href": "http://localhost%s/%s" % (self.path, uuid),
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000002",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
def test_get_server_by_id(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
uuid = FAKE_UUID
req = self.req(self.path_with_id_v2, uuid)
res_dict = self.controller.show(req, uuid)
expected_server = self._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_empty_az(self):
self.mock_get.side_effect = fakes.fake_compute_get(
availability_zone='')
uuid = FAKE_UUID
req = self.req(self.path_with_id_v2 % uuid)
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['server']['OS-EXT-AZ:availability_zone'], '')
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
self.mock_get.assert_called_once_with(
req.environ['nova.context'], FAKE_UUID,
expected_attrs=['flavor', 'info_cache', 'metadata',
'numa_topology'], cell_down_support=False)
def test_get_server_with_id_image_ref_by_id(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
self.mock_get.assert_called_once_with(
req.environ['nova.context'], FAKE_UUID,
expected_attrs=['flavor', 'info_cache', 'metadata',
'numa_topology'], cell_down_support=False)
def _generate_nw_cache_info(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
pub1 = ('1.2.3.4',)
pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
priv0 = ('192.168.0.3', '192.168.0.4',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'public',
'subnets': [{'cidr': '172.19.0.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': '1.2.3.0/16',
'ips': [_ip(ip) for ip in pub1]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub2]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br1',
'id': 2,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip(ip) for ip in priv0]}]}}]
return nw_cache
def test_get_server_addresses_from_cache(self):
nw_cache = self._generate_nw_cache_info()
self.mock_get.side_effect = fakes.fake_compute_get(nw_cache=nw_cache,
availability_zone='nova')
req = self.req((self.path_with_id % FAKE_UUID) + '/ips')
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3'},
{'version': 4, 'addr': '192.168.0.4'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1'},
{'version': 4, 'addr': '172.19.0.2'},
{'version': 4, 'addr': '1.2.3.4'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
],
},
}
self.assertThat(res_dict, matchers.DictMatches(expected))
self.mock_get.assert_called_once_with(
req.environ['nova.context'], FAKE_UUID,
expected_attrs=None, cell_down_support=False)
# Make sure we kept the addresses in order
self.assertIsInstance(res_dict['addresses'], collections.OrderedDict)
labels = [vif['network']['label'] for vif in nw_cache]
for index, label in enumerate(res_dict['addresses'].keys()):
self.assertEqual(label, labels[index])
def test_get_server_addresses_nonexistent_network(self):
url = ((self.path_with_id_v2 % FAKE_UUID) + '/ips/network_0')
req = self.req(url)
self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
req, FAKE_UUID, 'network_0')
def test_get_server_addresses_nonexistent_server(self):
self.mock_get.side_effect = exception.InstanceNotFound(
instance_id='fake')
req = self.req((self.path_with_id % uuids.fake) + '/ips')
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, uuids.fake)
self.mock_get.assert_called_once_with(
req.environ['nova.context'], uuids.fake, expected_attrs=None,
cell_down_support=False)
def test_show_server_hide_addresses_in_building(self):
uuid = FAKE_UUID
self.mock_get.side_effect = fakes.fake_compute_get(
uuid=uuid, vm_state=vm_states.BUILDING)
req = self.req(self.path_with_id_v2 % uuid)
res_dict = self.controller.show(req, uuid)
self.assertEqual({}, res_dict['server']['addresses'])
def test_show_server_addresses_in_non_building(self):
uuid = FAKE_UUID
nw_cache = self._generate_nw_cache_info()
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
{'version': 4, 'addr': '192.168.0.4',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '172.19.0.2',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '1.2.3.4',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
],
},
}
self.mock_get.side_effect = fakes.fake_compute_get(
nw_cache=nw_cache, uuid=uuid, vm_state=vm_states.ACTIVE)
req = self.req(self.path_with_id_v2 % uuid)
res_dict = self.controller.show(req, uuid)
self.assertThat(res_dict['server']['addresses'],
matchers.DictMatches(expected['addresses']))
def test_detail_server_hide_addresses(self):
nw_cache = self._generate_nw_cache_info()
expected = {
'addresses': {
'private': [
{'version': 4, 'addr': '192.168.0.3',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
{'version': 4, 'addr': '192.168.0.4',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
],
'public': [
{'version': 4, 'addr': '172.19.0.1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '172.19.0.2',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '1.2.3.4',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
],
},
}
def fake_get_all(context, **kwargs):
return objects.InstanceList(
objects=[fakes.stub_instance_obj(1,
vm_state=vm_states.BUILDING,
uuid=uuids.fake,
nw_cache=nw_cache),
fakes.stub_instance_obj(2,
vm_state=vm_states.ACTIVE,
uuid=uuids.fake2,
nw_cache=nw_cache)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'deleted=true',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
for server in servers:
if server['OS-EXT-STS:vm_state'] == 'building':
self.assertEqual({}, server['addresses'])
else:
self.assertThat(server['addresses'],
matchers.DictMatches(expected['addresses']))
def test_get_server_list_empty(self):
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = objects.InstanceList(objects=[])
req = self.req(self.path)
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['servers']))
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'], expected_attrs=[], limit=1000,
marker=None, search_opts={'deleted': False,
'project_id': self.project_id},
sort_dirs=['desc'], sort_keys=['created_at'],
cell_down_support=False, all_tenants=False)
def test_get_server_list_with_reservation_id(self):
req = self.req(self.path_with_query % 'reservation_id=foo')
res_dict = self.controller.index(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_empty(self):
req = self.req(self.path_detail_with_query % 'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list_with_reservation_id_details(self):
req = self.req(self.path_detail_with_query % 'reservation_id=foo')
res_dict = self.controller.detail(req)
i = 0
for s in res_dict['servers']:
self.assertEqual(s.get('name'), 'server%d' % (i + 1))
i += 1
def test_get_server_list(self):
req = self.req(self.path)
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertIsNone(s.get('image', None))
expected_links = [
{
"rel": "self",
"href": "http://localhost" + (
self.path_with_id_v2 % s['id']),
},
{
"rel": "bookmark",
"href": "http://localhost" + (
self.path_with_id % s['id']),
},
]
self.assertEqual(s['links'], expected_links)
def test_get_servers_with_limit(self):
req = self.req(self.path_with_query % 'limit=3')
res_dict = self.controller.index(req)
servers = res_dict['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res_dict['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2' + self.path,
href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = self.req(self.path_with_query % 'limit=aaa')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_server_details_empty(self):
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = objects.InstanceList(objects=[])
req = self.req(self.path_detail)
expected_attrs = ['flavor', 'info_cache', 'metadata']
if api_version_request.is_supported(req, '2.16'):
expected_attrs.append('services')
res_dict = self.controller.detail(req)
self.assertEqual(0, len(res_dict['servers']))
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'],
expected_attrs=sorted(expected_attrs),
limit=1000, marker=None,
search_opts={'deleted': False, 'project_id': self.project_id},
sort_dirs=['desc'], sort_keys=['created_at'],
cell_down_support=False, all_tenants=False)
def test_get_server_details_with_bad_name(self):
req = self.req(self.path_detail_with_query % 'name=%2Binstance')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_server_details_with_limit(self):
req = self.req(self.path_detail_with_query % 'limit=3')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual(self.path_detail_v2, href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = self.req(self.path_detail_with_query % 'limit=aaa')
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_get_server_details_with_limit_and_other_params(self):
req = self.req(self.path_detail_with_query %
'limit=3&blah=2:t&sort_key=uuid&sort_dir=asc')
res = self.controller.detail(req)
servers = res['servers']
self.assertEqual([s['id'] for s in servers],
[fakes.get_fake_uuid(i) for i in range(len(servers))])
servers_links = res['servers_links']
self.assertEqual(servers_links[0]['rel'], 'next')
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual(self.path_detail_v2, href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'],
'sort_key': ['uuid'], 'sort_dir': ['asc'],
'marker': [fakes.get_fake_uuid(2)]}
self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = self.req(self.path_with_query % 'limit=30')
res_dict = self.controller.index(req)
self.assertNotIn('servers_links', res_dict)
def test_get_servers_with_bad_limit(self):
req = self.req(self.path_with_query % 'limit=asdf')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_with_marker(self):
url = '%s?marker=%s' % (self.path_v2, fakes.get_fake_uuid(2))
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
def test_get_servers_with_limit_and_marker(self):
url = '%s?limit=2&marker=%s' % (self.path_v2,
fakes.get_fake_uuid(1))
req = self.req(url)
servers = self.controller.index(req)['servers']
self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
def test_get_servers_with_bad_marker(self):
req = self.req(self.path_with_query % 'limit=2&marker=asdf')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_invalid_filter_param(self):
req = self.req(self.path_with_query % 'info_cache=asdf',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
req = self.req(self.path_with_query % '__foo__=asdf',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_servers_with_invalid_regex_filter_param(self):
req = self.req(self.path_with_query % 'flavor=[[[',
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_with_empty_regex_filter_param(self):
req = self.req(self.path_with_query % 'flavor=',
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_detail_with_empty_regex_filter_param(self):
req = self.req(self.path_detail_with_query % 'flavor=',
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.detail, req)
def test_get_servers_invalid_sort_key(self):
# "hidden" is a real field for instances but not exposed in the API.
req = self.req(self.path_with_query %
'sort_key=hidden&sort_dir=desc')
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_get_servers_ignore_sort_key(self):
req = self.req(self.path_with_query %
'sort_key=vcpus&sort_dir=asc')
self.controller.index(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=[], sort_dirs=[],
cell_down_support=False, all_tenants=False)
def test_get_servers_ignore_locked_sort_key(self):
# Prior to microversion 2.73 locked sort key is ignored.
req = self.req(self.path_with_query %
'sort_key=locked&sort_dir=asc')
self.controller.detail(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=[], sort_dirs=[],
cell_down_support=False, all_tenants=False)
def test_get_servers_ignore_sort_key_only_one_dir(self):
req = self.req(self.path_with_query %
'sort_key=user_id&sort_key=vcpus&sort_dir=asc')
self.controller.index(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['user_id'],
sort_dirs=['asc'], cell_down_support=False, all_tenants=False)
def test_get_servers_ignore_sort_key_with_no_sort_dir(self):
req = self.req(self.path_with_query %
'sort_key=vcpus&sort_key=user_id')
self.controller.index(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['user_id'], sort_dirs=[],
cell_down_support=False, all_tenants=False)
def test_get_servers_ignore_sort_key_with_bad_sort_dir(self):
req = self.req(self.path_with_query %
'sort_key=vcpus&sort_dir=bad_dir')
self.controller.index(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=[], sort_dirs=[],
cell_down_support=False, all_tenants=False)
def test_get_servers_non_admin_with_admin_only_sort_key(self):
req = self.req(self.path_with_query %
'sort_key=host&sort_dir=desc')
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.index, req)
def test_get_servers_admin_with_admin_only_sort_key(self):
req = self.req(self.path_with_query %
'sort_key=node&sort_dir=desc',
use_admin_context=True)
self.controller.detail(req)
self.mock_get_all.assert_called_once_with(
mock.ANY, search_opts=mock.ANY, limit=mock.ANY, marker=mock.ANY,
expected_attrs=mock.ANY, sort_keys=['node'], sort_dirs=['desc'],
cell_down_support=False, all_tenants=False)
def test_get_servers_with_bad_option(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(100, uuid=uuids.fake)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'unknownoption=whee')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'], expected_attrs=[],
limit=1000, marker=None,
search_opts={'deleted': False, 'project_id': self.project_id},
sort_dirs=['desc'], sort_keys=['created_at'],
cell_down_support=False, all_tenants=False)
def test_get_servers_with_locked_filter(self):
# Prior to microversion 2.73 locked filter parameter is ignored.
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(100, uuid=uuids.fake)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'locked=true')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'], expected_attrs=[],
limit=1000, marker=None,
search_opts={'deleted': False, 'project_id': self.project_id},
sort_dirs=['desc'], sort_keys=['created_at'],
cell_down_support=False, all_tenants=False)
def test_get_servers_allows_image(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('image', search_opts)
self.assertEqual(search_opts['image'], '12345')
db_list = [fakes.stub_instance(100, uuid=uuids.fake)]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'image=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_tenant_id_filter_no_admin_context(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('tenant_id', search_opts)
self.assertEqual(self.project_id, search_opts['project_id'])
return [fakes.stub_instance_obj(100)]
req = self.req(self.path_with_query % 'tenant_id=newfake')
self.mock_get_all.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_tenant_id_filter_admin_context(self):
""""Test tenant_id search opt is dropped if all_tenants is not set."""
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('tenant_id', search_opts)
self.assertEqual(self.project_id, search_opts['project_id'])
return [fakes.stub_instance_obj(100)]
req = self.req(self.path_with_query % 'tenant_id=newfake',
use_admin_context=True)
self.mock_get_all.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_normal(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
req = self.req(self.path_with_query % 'all_tenants',
use_admin_context=True)
self.mock_get_all.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
def test_all_tenants_param_one(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('project_id', search_opts)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'all_tenants=1',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
def test_all_tenants_param_zero(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'all_tenants=0',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
def test_all_tenants_param_false(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'all_tenants=false',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
def test_all_tenants_param_invalid(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertNotIn('all_tenants', search_opts)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'all_tenants=xxx',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_admin_restricted_tenant(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertEqual(search_opts['project_id'], self.project_id)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path, use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
def test_all_tenants_pass_policy(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
self.assertNotIn('project_id', search_opts)
self.assertTrue(context.is_admin)
return [fakes.stub_instance_obj(100)]
self.mock_get_all.side_effect = fake_get_all
rules = {
"os_compute_api:servers:index": "project_id:%s" % self.project_id,
"os_compute_api:servers:index:get_all_tenants":
"project_id:%s" % self.project_id
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
req = self.req(self.path_with_query % 'all_tenants=1')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
def test_all_tenants_fail_policy(self):
def fake_get_all(context, search_opts=None, **kwargs):
self.assertIsNotNone(search_opts)
return [fakes.stub_instance_obj(100)]
rules = {
"os_compute_api:servers:index:get_all_tenants":
"project_id:non_fake",
"os_compute_api:servers:get_all":
"project_id:%s" % self.project_id,
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'all_tenants=1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_get_servers_allows_flavor(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('flavor', search_opts)
# flavor is an integer ID
self.assertEqual(search_opts['flavor'], '12345')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'flavor=12345')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_with_bad_flavor(self):
req = self.req(self.path_with_query % 'flavor=abcde')
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = objects.InstanceList(objects=[])
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_server_details_with_bad_flavor(self):
req = self.req(self.path_with_query % 'flavor=abcde')
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = objects.InstanceList(objects=[])
servers = self.controller.detail(req)['servers']
self.assertThat(servers, testtools.matchers.HasLength(0))
def test_get_servers_allows_status(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'status=active')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_allows_task_status(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('task_state', search_opts)
self.assertEqual([task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING],
search_opts['task_state'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(
100, uuid=uuids.fake, task_state=task_states.REBOOTING)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'status=reboot')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_resize_status(self):
# Test when resize status, it maps list of vm states.
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'],
[vm_states.ACTIVE, vm_states.STOPPED])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'status=resize')
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers), 1)
self.assertEqual(servers[0]['id'], uuids.fake)
def test_get_servers_invalid_status(self):
# Test getting servers by invalid status.
req = self.req(self.path_with_query % 'status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 0)
def test_get_servers_deleted_status_as_user(self):
req = self.req(self.path_with_query % 'status=deleted',
use_admin_context=False)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIn('vm_state', search_opts)
self.assertEqual(search_opts['vm_state'], ['deleted'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'status=deleted',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_deleted_filter_str_to_bool(self):
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake,
vm_state='deleted')])
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = db_list
req = self.req(self.path_with_query % 'deleted=true',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
# Assert that 'deleted' filter value is converted to boolean
# while calling get_all() method.
expected_search_opts = {'deleted': True, 'project_id': self.project_id}
self.assertEqual(expected_search_opts,
self.mock_get_all.call_args[1]['search_opts'])
def test_get_servers_deleted_filter_invalid_str(self):
db_list = objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = db_list
req = fakes.HTTPRequest.blank(self.path_with_query % 'deleted=abc',
use_admin_context=True)
servers = self.controller.detail(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
# Assert that invalid 'deleted' filter value is converted to boolean
# False while calling get_all() method.
expected_search_opts = {'deleted': False,
'project_id': self.project_id}
self.assertEqual(expected_search_opts,
self.mock_get_all.call_args[1]['search_opts'])
def test_get_servers_allows_name(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('name', search_opts)
self.assertEqual(search_opts['name'], 'whee.*')
self.assertEqual([], expected_attrs)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'name=whee.*')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_flavor_not_found(self):
self.mock_get_all.side_effect = exception.FlavorNotFound(flavor_id=1)
req = fakes.HTTPRequest.blank(
self.path_with_query % 'status=active&flavor=abc')
servers = self.controller.index(req)['servers']
self.assertEqual(0, len(servers))
def test_get_servers_allows_changes_since(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('changes-since', search_opts)
changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-since'], changes_since)
self.assertNotIn('deleted', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
params = 'changes-since=2011-01-24T17:08:01Z'
req = self.req(self.path_with_query % params)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_allows_changes_since_bad_value(self):
params = 'changes-since=asdf'
req = self.req(self.path_with_query % params)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_get_servers_allows_changes_since_bad_value_on_compat_mode(self):
params = 'changes-since=asdf'
req = self.req(self.path_with_query % params)
req.set_legacy_v2()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index,
req)
def test_get_servers_admin_filters_as_user(self):
"""Test getting servers by admin-only or unknown options when
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('ip', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertNotIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
req = fakes.HTTPRequest.blank(self.path_with_query % query_str)
res = self.controller.index(req)
servers = res['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_admin_options_as_admin(self):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('terminated_at', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertIn('ip', search_opts)
self.assertNotIn('unknown_option', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
query_str = ("name=foo&ip=10.*&status=active&unknown_option=meow&"
"terminated_at=^2016-02-01.*")
req = self.req(self.path_with_query % query_str,
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_admin_filters_as_user_with_policy_override(self):
"""Test getting servers by admin-only or unknown options when
context is not admin but policy allows.
"""
server_uuid = uuids.fake
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
# Allowed by user
self.assertIn('name', search_opts)
self.assertIn('terminated_at', search_opts)
# OSAPI converts status to vm_state
self.assertIn('vm_state', search_opts)
# Allowed only by admins with admin API on
self.assertIn('ip', search_opts)
self.assertNotIn('unknown_option', search_opts)
# "hidden" is ignored as a filter parameter since it is only used
# internally
self.assertNotIn('hidden', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=server_uuid)])
rules = {
"os_compute_api:servers:index": "project_id:%s" % self.project_id,
"os_compute_api:servers:allow_all_filters":
"project_id:%s" % self.project_id,
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.mock_get_all.side_effect = fake_get_all
query_str = ("name=foo&ip=10.*&status=active&unknown_option=meow&"
"terminated_at=^2016-02-01.*&hidden=true")
req = self.req(self.path_with_query % query_str)
servers = self.controller.index(req)['servers']
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_ip(self):
"""Test getting servers by ip."""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('ip', search_opts)
self.assertEqual(search_opts['ip'], r'10\..*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % r'ip=10\..*')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_admin_allows_ip6(self):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'ip6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_allows_ip6_with_new_version(self):
"""Test getting servers by ip6 with new version requested
and no admin context
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('ip6', search_opts)
self.assertEqual(search_opts['ip6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'ip6=ffff.*')
req.api_version_request = api_version_request.APIVersionRequest('2.5')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_admin_allows_access_ip_v4(self):
"""Test getting servers by access_ip_v4 with admin_api enabled and
admin context
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('access_ip_v4', search_opts)
self.assertEqual(search_opts['access_ip_v4'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'access_ip_v4=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_admin_allows_access_ip_v6(self):
"""Test getting servers by access_ip_v6 with admin_api enabled and
admin context
"""
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('access_ip_v6', search_opts)
self.assertEqual(search_opts['access_ip_v6'], 'ffff.*')
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'access_ip_v6=ffff.*',
use_admin_context=True)
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def _assertServerUsage(self, server, launched_at, terminated_at):
resp_launched_at = timeutils.parse_isotime(
server.get('OS-SRV-USG:launched_at'))
self.assertEqual(timeutils.normalize_time(resp_launched_at),
launched_at)
resp_terminated_at = timeutils.parse_isotime(
server.get('OS-SRV-USG:terminated_at'))
self.assertEqual(timeutils.normalize_time(resp_terminated_at),
terminated_at)
def test_show_server_usage(self):
DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
self.mock_get.side_effect = fakes.fake_compute_get(
id=1, uuid=FAKE_UUID, launched_at=DATE1, terminated_at=DATE2)
req = self.req(self.path_with_id % FAKE_UUID)
req.accept = 'application/json'
req.method = 'GET'
res = req.get_response(compute.APIRouterV21())
self.assertEqual(res.status_int, 200)
self.useFixture(utils_fixture.TimeFixture())
self._assertServerUsage(jsonutils.loads(res.body).get('server'),
launched_at=DATE1,
terminated_at=DATE2)
def test_detail_server_usage(self):
DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
DATE3 = datetime.datetime(year=2013, month=4, day=5, hour=14)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance_obj(context, id=2, uuid=FAKE_UUID,
launched_at=DATE2,
terminated_at=DATE3),
fakes.stub_instance_obj(context, id=3, uuid=FAKE_UUID,
launched_at=DATE1,
terminated_at=DATE3),
]
return objects.InstanceList(objects=db_list)
self.mock_get_all.side_effect = fake_compute_get_all
req = self.req(self.path_detail)
req.accept = 'application/json'
servers = req.get_response(compute.APIRouterV21())
self.assertEqual(servers.status_int, 200)
self._assertServerUsage(jsonutils.loads(
servers.body).get('servers')[0],
launched_at=DATE2,
terminated_at=DATE3)
self._assertServerUsage(jsonutils.loads(
servers.body).get('servers')[1],
launched_at=DATE1,
terminated_at=DATE3)
def test_get_all_server_details(self):
expected_flavor = {
"id": "2",
"links": [
{
"rel": "bookmark",
"href": ('http://localhost/%s/flavors/2' %
self.project_id),
},
],
}
expected_image = {
"id": "10",
"links": [
{
"rel": "bookmark",
"href": ('http://localhost/%s/images/10' %
self.project_id),
},
],
}
req = self.req(self.path_detail)
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % (i + 1))
self.assertEqual(s['image'], expected_image)
self.assertEqual(s['flavor'], expected_flavor)
self.assertEqual(s['status'], 'ACTIVE')
self.assertEqual(s['metadata']['seq'], str(i + 1))
def test_get_all_server_details_with_host(self):
"""We want to make sure that if two instances are on the same host,
then they return the same hostId. If two instances are on different
hosts, they should return different hostIds. In this test,
there are 5 instances - 2 on one host and 3 on another.
"""
def return_servers_with_host(*args, **kwargs):
return objects.InstanceList(
objects=[fakes.stub_instance_obj(None,
id=i + 1,
user_id='fake',
project_id='fake',
host=i % 2,
uuid=fakes.get_fake_uuid(i))
for i in range(5)])
self.mock_get_all.side_effect = return_servers_with_host
req = self.req(self.path_detail)
res_dict = self.controller.detail(req)
server_list = res_dict['servers']
host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
self.assertTrue(host_ids[0] and host_ids[1])
self.assertNotEqual(host_ids[0], host_ids[1])
for i, s in enumerate(server_list):
self.assertEqual(s['id'], fakes.get_fake_uuid(i))
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_get_servers_joins_services(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
cur = api_version_request.APIVersionRequest(self.wsgi_api_version)
v216 = api_version_request.APIVersionRequest('2.16')
if cur >= v216:
self.assertIn('services', expected_attrs)
else:
self.assertNotIn('services', expected_attrs)
return objects.InstanceList()
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_detail, use_admin_context=True)
self.assertIn('servers', self.controller.detail(req))
req = fakes.HTTPRequest.blank(self.path_detail,
use_admin_context=True,
version=self.wsgi_api_version)
self.assertIn('servers', self.controller.detail(req))
class ServersControllerTestV23(ServersControllerTest):
wsgi_api_version = '2.3'
def setUp(self):
super(ServersControllerTestV23, self).setUp()
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
server_dict = super(ServersControllerTestV23,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2"
server_dict['server'][
"OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake"
server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1
server_dict['server']["OS-EXT-SRV-ATTR:launch_index"] = 0
server_dict['server']["OS-EXT-SRV-ATTR:ramdisk_id"] = UUID2
server_dict['server']["OS-EXT-SRV-ATTR:reservation_id"] = "r-1"
server_dict['server']["OS-EXT-SRV-ATTR:root_device_name"] = "/dev/vda"
server_dict['server']["OS-EXT-SRV-ATTR:user_data"] = "userdata"
server_dict['server']["OS-EXT-STS:task_state"] = None
server_dict['server']["OS-EXT-STS:vm_state"] = vm_states.ACTIVE
server_dict['server']["OS-EXT-STS:power_state"] = 1
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
return server_dict
def test_show(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_detail(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
obj_list = []
for i in range(2):
server = fakes.stub_instance_obj(context,
id=2, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = fake_get_all(context)
req = self.req(self.path_detail)
servers_list = self.controller.detail(req)
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertIn(expected_server['server'], servers_list['servers'])
class ServersControllerTestV29(ServersControllerTest):
wsgi_api_version = '2.9'
def setUp(self):
super(ServersControllerTestV29, self).setUp()
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
server_dict = super(ServersControllerTestV29,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2"
server_dict['server'][
"OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake"
server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1
server_dict['server']["OS-EXT-SRV-ATTR:launch_index"] = 0
server_dict['server']["OS-EXT-SRV-ATTR:ramdisk_id"] = UUID2
server_dict['server']["OS-EXT-SRV-ATTR:reservation_id"] = "r-1"
server_dict['server']["OS-EXT-SRV-ATTR:root_device_name"] = "/dev/vda"
server_dict['server']["OS-EXT-SRV-ATTR:user_data"] = "userdata"
server_dict['server']["OS-EXT-STS:task_state"] = None
server_dict['server']["OS-EXT-STS:vm_state"] = vm_states.ACTIVE
server_dict['server']["OS-EXT-STS:power_state"] = 1
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
return server_dict
def _test_get_server_with_lock(self, locked_by):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, locked_by=locked_by, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
expected_server['server']['locked'] = True if locked_by else False
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
def test_get_server_with_locked_by_admin(self):
res_dict = self._test_get_server_with_lock('admin')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_with_locked_by_owner(self):
res_dict = self._test_get_server_with_lock('owner')
self.assertTrue(res_dict['server']['locked'])
def test_get_server_not_locked(self):
res_dict = self._test_get_server_with_lock(None)
self.assertFalse(res_dict['server']['locked'])
def _test_list_server_detail_with_lock(self,
s1_locked,
s2_locked):
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = fake_instance_get_all_with_locked(
context, [s1_locked, s2_locked],
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
req = self.req(self.path_detail)
servers_list = self.controller.detail(req)
# Check that each returned server has the same 'locked' value
# and 'id' as they were created.
for locked in [s1_locked, s2_locked]:
server = next(server for server in servers_list['servers']
if (server['id'] == fakes.get_fake_uuid(locked)))
expected = False if locked == 'not_locked' else True
self.assertEqual(expected, server['locked'])
def test_list_server_detail_with_locked_s1_admin_s2_owner(self):
self._test_list_server_detail_with_lock('admin', 'owner')
def test_list_server_detail_with_locked_s1_owner_s2_admin(self):
self._test_list_server_detail_with_lock('owner', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_admin(self):
self._test_list_server_detail_with_lock('admin', 'admin')
def test_list_server_detail_with_locked_s1_admin_s2_not_locked(self):
self._test_list_server_detail_with_lock('admin', 'not_locked')
def test_list_server_detail_with_locked_s1_s2_not_locked(self):
self._test_list_server_detail_with_lock('not_locked',
'not_locked')
def test_get_servers_remove_non_search_options(self):
self.mock_get_all.side_effect = None
req = fakes.HTTPRequestV21.blank('/servers'
'?sort_key=uuid&sort_dir=asc'
'&sort_key=user_id&sort_dir=desc'
'&limit=1&marker=123',
use_admin_context=True)
self.controller.index(req)
kwargs = self.mock_get_all.call_args[1]
search_opts = kwargs['search_opts']
for key in ('sort_key', 'sort_dir', 'limit', 'marker'):
self.assertNotIn(key, search_opts)
class ServersControllerTestV216(ServersControllerTest):
wsgi_api_version = '2.16'
def setUp(self):
super(ServersControllerTestV216, self).setUp()
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID,
host="node-fake",
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
self.mock_get_instance_host_status = self.useFixture(
fixtures.MockPatchObject(
compute_api.API, 'get_instance_host_status',
return_value='UP')).mock
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100):
server_dict = super(ServersControllerTestV216,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
server_dict['server']["host_status"] = "UP"
server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2"
server_dict['server']['hostId'] = nova_utils.generate_hostid(
'node-fake', server_dict['server']['tenant_id'])
server_dict['server']["OS-EXT-SRV-ATTR:host"] = "node-fake"
server_dict['server'][
"OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake"
server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1
server_dict['server']["OS-EXT-SRV-ATTR:launch_index"] = 0
server_dict['server']["OS-EXT-SRV-ATTR:ramdisk_id"] = UUID2
server_dict['server']["OS-EXT-SRV-ATTR:reservation_id"] = "r-1"
server_dict['server']["OS-EXT-SRV-ATTR:root_device_name"] = "/dev/vda"
server_dict['server']["OS-EXT-SRV-ATTR:user_data"] = "userdata"
server_dict['server']["OS-EXT-STS:task_state"] = None
server_dict['server']["OS-EXT-STS:vm_state"] = vm_states.ACTIVE
server_dict['server']["OS-EXT-STS:power_state"] = 1
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
return server_dict
@mock.patch('nova.compute.api.API.get_instance_host_status')
def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
}
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
mock_get_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
def test_show(self):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
func = functools.partial(self.controller.show, req, FAKE_UUID)
self._verify_host_status_policy_behavior(func)
def test_detail(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
obj_list = []
for i in range(2):
server = fakes.stub_instance_obj(context,
id=2, uuid=FAKE_UUID,
host="node-fake",
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
obj_list.append(server)
return objects.InstanceList(objects=obj_list)
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = fake_get_all(context)
req = self.req(self.path_detail)
servers_list = self.controller.detail(req)
self.assertEqual(2, len(servers_list['servers']))
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0)
self.assertIn(expected_server['server'], servers_list['servers'])
# We should have only gotten the host status once per host (and the
# 2 servers in the response are using the same host).
self.mock_get_instance_host_status.assert_called_once()
func = functools.partial(self.controller.detail, req)
self._verify_host_status_policy_behavior(func)
class ServersControllerTestV219(ServersControllerTest):
wsgi_api_version = '2.19'
def setUp(self):
super(ServersControllerTestV219, self).setUp()
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
self.useFixture(fixtures.MockPatchObject(
compute_api.API, 'get_instance_host_status',
return_value='UP')).mock
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100, description=None):
server_dict = super(ServersControllerTestV219,
self)._get_server_data_dict(uuid,
image_bookmark,
flavor_bookmark,
status,
progress)
server_dict['server']['locked'] = False
server_dict['server']['description'] = description
server_dict['server']["host_status"] = "UP"
server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2"
server_dict['server'][
"OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake"
server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1
server_dict['server']["OS-EXT-SRV-ATTR:launch_index"] = 0
server_dict['server']["OS-EXT-SRV-ATTR:ramdisk_id"] = UUID2
server_dict['server']["OS-EXT-SRV-ATTR:reservation_id"] = "r-1"
server_dict['server']["OS-EXT-SRV-ATTR:root_device_name"] = "/dev/vda"
server_dict['server']["OS-EXT-SRV-ATTR:user_data"] = "userdata"
server_dict['server']["OS-EXT-STS:task_state"] = None
server_dict['server']["OS-EXT-STS:vm_state"] = vm_states.ACTIVE
server_dict['server']["OS-EXT-STS:power_state"] = 1
server_dict['server']["os-extended-volumes:volumes_attached"] = [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False}]
return server_dict
def _test_get_server_with_description(self, description):
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
self.mock_get.side_effect = fakes.fake_compute_get(
id=2, display_description=description, uuid=FAKE_UUID,
node="node-fake",
reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2,
display_name="server2",
root_device_name="/dev/vda",
user_data="userdata",
metadata={"seq": "2"},
availability_zone='nova',
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
req = self.req(self.path_with_id % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
expected_server = self._get_server_data_dict(FAKE_UUID,
image_bookmark,
flavor_bookmark,
progress=0,
description=description)
self.assertThat(res_dict, matchers.DictMatches(expected_server))
return res_dict
def _test_list_server_detail_with_descriptions(self,
s1_desc,
s2_desc):
self.mock_get_all.side_effect = None
self.mock_get_all.return_value = (
fake_instance_get_all_with_description(context,
[s1_desc, s2_desc],
launched_at=None,
terminated_at=None))
req = self.req(self.path_detail)
servers_list = self.controller.detail(req)
# Check that each returned server has the same 'description' value
# and 'id' as they were created.
for desc in [s1_desc, s2_desc]:
server = next(server for server in servers_list['servers']
if (server['id'] == fakes.get_fake_uuid(desc)))
expected = desc
self.assertEqual(expected, server['description'])
def test_get_server_with_description(self):
self._test_get_server_with_description('test desc')
def test_list_server_detail_with_descriptions(self):
self._test_list_server_detail_with_descriptions('desc1', 'desc2')
class ServersControllerTestV226(ControllerTest):
wsgi_api_version = '2.26'
def test_get_server_with_tags_by_id(self):
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID,
version=self.wsgi_api_version)
ctxt = req.environ['nova.context']
tags = ['tag1', 'tag2']
def fake_get(*args, **kwargs):
self.assertIn('tags', kwargs['expected_attrs'])
fake_server = fakes.stub_instance_obj(
ctxt, id=2, vm_state=vm_states.ACTIVE, progress=100)
tag_list = objects.TagList(objects=[
objects.Tag(resource_id=FAKE_UUID, tag=tag)
for tag in tags])
fake_server.tags = tag_list
return fake_server
self.mock_get.side_effect = fake_get
res_dict = self.controller.show(req, FAKE_UUID)
self.assertIn('tags', res_dict['server'])
self.assertEqual(tags, res_dict['server']['tags'])
def _test_get_servers_allows_tag_filters(self, filter_name):
query_string = '%s=t1,t2' % filter_name
req = fakes.HTTPRequest.blank(self.path_with_query % query_string,
version=self.wsgi_api_version)
def fake_get_all(*a, **kw):
self.assertIsNotNone(kw['search_opts'])
self.assertIn(filter_name, kw['search_opts'])
self.assertEqual(kw['search_opts'][filter_name], ['t1', 't2'])
return objects.InstanceList(
objects=[fakes.stub_instance_obj(req.environ['nova.context'],
uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_allows_tags_filter(self):
self._test_get_servers_allows_tag_filters('tags')
def test_get_servers_allows_tags_any_filter(self):
self._test_get_servers_allows_tag_filters('tags-any')
def test_get_servers_allows_not_tags_filter(self):
self._test_get_servers_allows_tag_filters('not-tags')
def test_get_servers_allows_not_tags_any_filter(self):
self._test_get_servers_allows_tag_filters('not-tags-any')
class ServerControllerTestV238(ControllerTest):
wsgi_api_version = '2.38'
def _test_invalid_status(self, is_admin):
req = fakes.HTTPRequest.blank(
self.path_detail_with_query % 'status=invalid',
version=self.wsgi_api_version, use_admin_context=is_admin)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.detail, req)
def test_list_servers_detail_invalid_status_for_admin(self):
self._test_invalid_status(True)
def test_list_servers_detail_invalid_status_for_non_admin(self):
self._test_invalid_status(False)
class ServerControllerTestV247(ControllerTest):
"""Server controller test for microversion 2.47
The intent here is simply to verify that when showing server details
after microversion 2.47 that the flavor is shown as a dict of flavor
information rather than as dict of id/links. The existence of the
'extra_specs' key is controlled by policy.
"""
wsgi_api_version = '2.47'
@mock.patch.object(objects.TagList, 'get_by_resource_id')
def test_get_all_server_details(self, mock_get_by_resource_id):
# Fake out tags on the instances
mock_get_by_resource_id.return_value = objects.TagList()
expected_flavor = {
'disk': 20,
'ephemeral': 0,
'extra_specs': {},
'original_name': u'm1.small',
'ram': 2048,
'swap': 0,
'vcpus': 1}
req = fakes.HTTPRequest.blank(self.path_detail,
version=self.wsgi_api_version)
hits = []
real_auth = policy.authorize
# Wrapper for authorize to count the number of times
# we authorize for extra-specs
def fake_auth(context, action, target):
if 'extra-specs' in action:
hits.append(1)
return real_auth(context, action, target)
with mock.patch('nova.policy.authorize') as mock_auth:
mock_auth.side_effect = fake_auth
res_dict = self.controller.detail(req)
# We should have found more than one servers, but only hit the
# policy check once
self.assertGreater(len(res_dict['servers']), 1)
self.assertEqual(1, len(hits))
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['flavor'], expected_flavor)
@mock.patch.object(objects.TagList, 'get_by_resource_id')
def test_get_all_server_details_no_extra_spec(self,
mock_get_by_resource_id):
# Fake out tags on the instances
mock_get_by_resource_id.return_value = objects.TagList()
# Set the policy so we don't have permission to index
# flavor extra-specs but are able to get server details.
servers_rule = 'os_compute_api:servers:detail'
extraspec_rule = 'os_compute_api:os-flavor-extra-specs:index'
self.policy.set_rules({
extraspec_rule: 'rule:admin_api',
servers_rule: '@'})
expected_flavor = {
'disk': 20,
'ephemeral': 0,
'original_name': u'm1.small',
'ram': 2048,
'swap': 0,
'vcpus': 1}
req = fakes.HTTPRequest.blank(self.path_detail,
version=self.wsgi_api_version)
res_dict = self.controller.detail(req)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['flavor'], expected_flavor)
class ServerControllerTestV266(ControllerTest):
"""Server controller test for microversion 2.66
Add changes-before parameter to get servers or servers details of
2.66 microversion.
Filters the response by a date and time stamp when the server last
changed. Those changed before the specified date and time stamp are
returned.
"""
wsgi_api_version = '2.66'
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_get_servers_allows_changes_before(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('changes-before', search_opts)
changes_before = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-before'], changes_before)
self.assertNotIn('deleted', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
params = 'changes-before=2011-01-24T17:08:01Z'
req = self.req(self.path_with_query % params)
req.api_version_request = api_version_request.APIVersionRequest('2.66')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_allows_changes_before_bad_value(self):
params = 'changes-before=asdf'
req = self.req(self.path_with_query % params)
req.api_version_request = api_version_request.APIVersionRequest('2.66')
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_get_servers_allows_changes_before_bad_value_on_compat_mode(self):
params = 'changes-before=asdf'
req = self.req(self.path_with_query % params)
req.api_version_request = api_version_request.APIVersionRequest('2.66')
req.set_legacy_v2()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
def test_get_servers_allows_changes_since_and_changes_before(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
self.assertIn('changes-since', search_opts)
changes_since = datetime.datetime(2011, 1, 23, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertIn('changes-before', search_opts)
changes_before = datetime.datetime(2011, 1, 24, 17, 8, 1,
tzinfo=iso8601.iso8601.UTC)
self.assertEqual(search_opts['changes-since'], changes_since)
self.assertEqual(search_opts['changes-before'], changes_before)
self.assertNotIn('deleted', search_opts)
return objects.InstanceList(
objects=[fakes.stub_instance_obj(100, uuid=uuids.fake)])
self.mock_get_all.side_effect = fake_get_all
params = 'changes-since=2011-01-23T17:08:01Z&' \
'changes-before=2011-01-24T17:08:01Z'
req = self.req(self.path_with_query % params)
req.api_version_request = api_version_request.APIVersionRequest('2.66')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
def test_get_servers_filters_with_distinct_changes_time_bad_request(self):
changes_since = '2018-09-04T05:45:27Z'
changes_before = '2018-09-03T05:45:27Z'
query_string = ('changes-since=%s&changes-before=%s' %
(changes_since, changes_before))
req = self.req(self.path_with_query % query_string)
req.api_version_request = api_version_request.APIVersionRequest('2.66')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
class ServersControllerTestV271(ControllerTest):
wsgi_api_version = '2.71'
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_show_server_group_not_exist(self):
req = self.req(self.path_with_id % FAKE_UUID)
servers = self.controller.show(req, FAKE_UUID)
expect_sg = []
self.assertEqual(expect_sg, servers['server']['server_groups'])
class ServersControllerTestV273(ControllerTest):
"""Server Controller test for microversion 2.73
The intent here is simply to verify that when showing server details
after microversion 2.73 the response will also have the locked_reason
key for the servers.
"""
wsgi_api_version = '2.73'
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_get_servers_with_locked_filter(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(
100, uuid=uuids.fake, locked_by='fake')]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'locked=true')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
search = {'deleted': False, 'project_id': self.project_id,
'locked': True}
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'], expected_attrs=[],
limit=1000, marker=None,
search_opts=search,
sort_dirs=['desc'], sort_keys=['created_at'],
cell_down_support=False, all_tenants=False)
def test_get_servers_with_locked_filter_invalid_value(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(
100, uuid=uuids.fake, locked_by='fake')]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'locked=price')
exp = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
self.assertIn("Unrecognized value 'price'", six.text_type(exp))
def test_get_servers_with_locked_filter_empty_value(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(
100, uuid=uuids.fake, locked_by='fake')]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query % 'locked=')
exp = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
self.assertIn("Unrecognized value ''", six.text_type(exp))
def test_get_servers_with_locked_sort_key(self):
def fake_get_all(context, search_opts=None,
limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
db_list = [fakes.stub_instance(
100, uuid=uuids.fake, locked_by='fake')]
return instance_obj._make_instance_list(
context, objects.InstanceList(), db_list, FIELDS)
self.mock_get_all.side_effect = fake_get_all
req = self.req(self.path_with_query %
'sort_dir=desc&sort_key=locked')
servers = self.controller.index(req)['servers']
self.assertEqual(1, len(servers))
self.assertEqual(uuids.fake, servers[0]['id'])
self.mock_get_all.assert_called_once_with(
req.environ['nova.context'], expected_attrs=[],
limit=1000, marker=None,
search_opts={'deleted': False, 'project_id': self.project_id},
sort_dirs=['desc'], sort_keys=['locked'],
cell_down_support=False, all_tenants=False)
class ServersControllerTestV275(ControllerTest):
wsgi_api_version = '2.75'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
@mock.patch('nova.compute.api.API.get_all')
def test_get_servers_additional_query_param_old_version(self, mock_get):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
@mock.patch('nova.compute.api.API.get_all')
def test_get_servers_ignore_sort_key_old_version(self, mock_get):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
self.controller.index(req)
def test_get_servers_additional_query_param(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version=self.wsgi_api_version)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_get_servers_previously_ignored_sort_key(self):
for s_ignore in servers_schema.SERVER_LIST_IGNORE_SORT_KEY_V273:
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=%s' % s_ignore,
use_admin_context=True,
version=self.wsgi_api_version)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_get_servers_additional_sort_key(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=unknown',
use_admin_context=True, version=self.wsgi_api_version)
self.assertRaises(exception.ValidationError, self.controller.index,
req)
def test_update_response_no_show_server_only_attributes_old_version(self):
# There are some old server attributes which were added only for
# GET server APIs not for PUT. GET server and PUT server share the
# same view builder method SHOW() to build the response, So make sure
# attributes which are not supposed to be included for PUT
# response are not present.
body = {'server': {'name': 'server_test'}}
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
res_dict = self.controller.update(req, FAKE_UUID, body=body)
for field in GET_ONLY_FIELDS:
self.assertNotIn(field, res_dict['server'])
for items in res_dict['server']['addresses'].values():
for item in items:
self.assertNotIn('OS-EXT-IPS:type', item)
self.assertNotIn('OS-EXT-IPS-MAC:mac_addr', item)
def test_update_response_has_show_server_all_attributes(self):
body = {'server': {'name': 'server_test'}}
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version=self.wsgi_api_version)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
for field in GET_ONLY_FIELDS:
self.assertIn(field, res_dict['server'])
for items in res_dict['server']['addresses'].values():
for item in items:
self.assertIn('OS-EXT-IPS:type', item)
self.assertIn('OS-EXT-IPS-MAC:mac_addr', item)
def test_rebuild_response_no_show_server_only_attributes_old_version(self):
# There are some old server attributes which were added only for
# GET server APIs not for Rebuild. GET server and Rebuild server share
# same view builder method SHOW() to build the response, So make sure
# the attributes which are not supposed to be included for Rebuild
# response are not present.
body = {'rebuild': {"imageRef": self.image_uuid}}
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
fake_get = fakes.fake_compute_get(
vm_state=vm_states.ACTIVE,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
res_dict = self.controller._action_rebuild(req, FAKE_UUID,
body=body).obj
get_only_fields_Rebuild = copy.deepcopy(GET_ONLY_FIELDS)
get_only_fields_Rebuild.remove('key_name')
for field in get_only_fields_Rebuild:
self.assertNotIn(field, res_dict['server'])
for items in res_dict['server']['addresses'].values():
for item in items:
self.assertNotIn('OS-EXT-IPS:type', item)
self.assertNotIn('OS-EXT-IPS-MAC:mac_addr', item)
def test_rebuild_response_has_show_server_all_attributes(self):
body = {'rebuild': {"imageRef": self.image_uuid}}
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version=self.wsgi_api_version)
fake_get = fakes.fake_compute_get(
vm_state=vm_states.ACTIVE,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
res_dict = self.controller._action_rebuild(req, FAKE_UUID,
body=body).obj
for field in GET_ONLY_FIELDS:
if field == 'OS-EXT-SRV-ATTR:user_data':
self.assertNotIn(field, res_dict['server'])
field = 'user_data'
self.assertIn(field, res_dict['server'])
for items in res_dict['server']['addresses'].values():
for item in items:
self.assertIn('OS-EXT-IPS:type', item)
self.assertIn('OS-EXT-IPS-MAC:mac_addr', item)
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
super(ServersControllerDeleteTest, self).setUp()
self.server_delete_called = False
def fake_delete(api, context, instance):
if instance.uuid == uuids.non_existent_uuid:
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.server_delete_called = True
self.stub_out('nova.compute.api.API.delete', fake_delete)
def _create_delete_request(self, uuid):
fakes.stub_out_instance_quota(self, 0, 10)
req = fakes.HTTPRequestV21.blank(self.path_with_id % uuid)
req.method = 'DELETE'
fake_get = fakes.fake_compute_get(
uuid=uuid,
vm_state=vm_states.ACTIVE,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
return req
def _delete_server_instance(self, uuid=FAKE_UUID):
req = self._create_delete_request(uuid)
self.controller.delete(req, uuid)
def test_delete_server_instance(self):
self._delete_server_instance()
self.assertTrue(self.server_delete_called)
def test_delete_server_instance_not_found(self):
self.assertRaises(webob.exc.HTTPNotFound,
self._delete_server_instance,
uuid=uuids.non_existent_uuid)
def test_delete_server_instance_while_building(self):
req = self._create_delete_request(FAKE_UUID)
self.controller.delete(req, FAKE_UUID)
self.assertTrue(self.server_delete_called)
@mock.patch.object(compute_api.API, 'delete',
side_effect=exception.InstanceIsLocked(
instance_uuid=FAKE_UUID))
def test_delete_locked_server(self, mock_delete):
req = self._create_delete_request(FAKE_UUID)
self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
req, FAKE_UUID)
mock_delete.assert_called_once_with(
req.environ['nova.context'], test.MatchType(objects.Instance))
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
fake_get = fakes.fake_compute_get(
vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
self.controller.delete(req, FAKE_UUID)
def test_delete_server_instance_if_not_launched(self):
self.flags(reclaim_instance_interval=3600)
req = fakes.HTTPRequestV21.blank(self.path_with_id % FAKE_UUID)
req.method = 'DELETE'
self.server_delete_called = False
fake_get = fakes.fake_compute_get(
launched_at=None,
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
def instance_destroy_mock(*args, **kwargs):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
self.stub_out('nova.db.api.instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
# even if reclaim_instance_interval has been set.
self.assertTrue(self.server_delete_called)
class ServersControllerRebuildInstanceTest(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
expected_key_name = False
def setUp(self):
super(ServersControllerRebuildInstanceTest, self).setUp()
self.req = fakes.HTTPRequest.blank(self.path_action % FAKE_UUID)
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
self.req_user_id = self.req.environ['nova.context'].user_id
self.req_project_id = self.req.environ['nova.context'].project_id
self.useFixture(nova_fixtures.SingleCellSimple())
def fake_get(ctrl, ctxt, uuid):
if uuid == 'test_inst':
raise webob.exc.HTTPNotFound(explanation='fakeout')
return fakes.stub_instance_obj(None,
vm_state=vm_states.ACTIVE,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.useFixture(
fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
'ServersController._get_instance',
fake_get))
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.mock_get.side_effect = fake_get
self.body = {
'rebuild': {
'name': 'new_name',
'imageRef': self.image_uuid,
'metadata': {
'open': 'stack',
},
},
}
def test_rebuild_server_with_image_not_uuid(self):
self.body['rebuild']['imageRef'] = 'not-uuid'
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_server_with_image_as_full_url(self):
image_href = (
'http://localhost/v2/%s/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' % self.project_id)
self.body['rebuild']['imageRef'] = image_href
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_server_with_image_as_empty_string(self):
self.body['rebuild']['imageRef'] = ''
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID,
body=self.body)
def test_rebuild_instance_name_with_spaces_in_the_middle(self):
self.body['rebuild']['name'] = 'abc def'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller._action_rebuild(self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces(self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_name_with_leading_trailing_spaces_compat_mode(
self):
self.body['rebuild']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
def fake_rebuild(*args, **kwargs):
self.assertEqual('abc def', kwargs['display_name'])
with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild:
mock_rebuild.side_effect = fake_rebuild
self.controller._action_rebuild(self.req, FAKE_UUID,
body=self.body)
def test_rebuild_instance_with_blank_metadata_key(self):
self.body['rebuild']['metadata'][''] = 'world'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_key_too_long(self):
self.body['rebuild']['metadata'][('a' * 260)] = 'world'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_too_long(self):
self.body['rebuild']['metadata']['key1'] = ('a' * 260)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
def test_rebuild_instance_with_metadata_value_not_string(self):
self.body['rebuild']['metadata']['key1'] = 1
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
@mock.patch.object(fake._FakeImageService, 'show',
return_value=dict(
id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="4096", min_disk="10"))
def test_rebuild_instance_fails_when_min_ram_too_small(self, mock_show):
# make min_ram larger than our instance ram size
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
mock_show.assert_called_once_with(
self.req.environ['nova.context'], self.image_uuid,
include_locations=False, show_deleted=True)
@mock.patch.object(fake._FakeImageService, 'show',
return_value=dict(
id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
min_ram="128", min_disk="100000"))
def test_rebuild_instance_fails_when_min_disk_too_small(self, mock_show):
# make min_disk larger than our instance disk size
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild, self.req,
FAKE_UUID, body=self.body)
mock_show.assert_called_once_with(
self.req.environ['nova.context'], self.image_uuid,
include_locations=False, show_deleted=True)
@mock.patch.object(fake._FakeImageService, 'show',
return_value=dict(
id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='active', size=str(1000 * (1024 ** 3))))
def test_rebuild_instance_image_too_large(self, mock_show):
# make image size larger than our instance disk size
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
mock_show.assert_called_once_with(
self.req.environ['nova.context'], self.image_uuid,
include_locations=False, show_deleted=True)
def test_rebuild_instance_name_all_blank(self):
self.body['rebuild']['name'] = ' '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(fake._FakeImageService, 'show',
return_value=dict(
id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True,
status='DELETED'))
def test_rebuild_instance_with_deleted_image(self, mock_show):
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
mock_show.assert_called_once_with(
self.req.environ['nova.context'], self.image_uuid,
include_locations=False, show_deleted=True)
def test_rebuild_instance_onset_file_limit_over_quota(self):
def fake_get_image(self, context, image_href, **kwargs):
return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
name='public image', is_public=True, status='active')
with test.nested(
mock.patch.object(fake._FakeImageService, 'show',
side_effect=fake_get_image),
mock.patch.object(self.controller.compute_api, 'rebuild',
side_effect=exception.OnsetFileLimitExceeded)
) as (
show_mock, rebuild_mock
):
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_bad_personality(self):
# Personality files have been deprecated as of v2.57
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.56')
body = {
"rebuild": {
"imageRef": self.image_uuid,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_personality(self):
# Personality files have been deprecated as of v2.57
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.56')
body = {
"rebuild": {
"imageRef": self.image_uuid,
"personality": [{
"path": "/path/to/file",
"contents": base64.encode_as_text("Test String"),
}]
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
self.assertNotIn('personality', body['server'])
def test_rebuild_response_has_no_show_server_only_attributes(self):
# There are some old server attributes which were added only for
# GET server APIs not for rebuild. GET server and Rebuild share the
# same view builder method SHOW() to build the response, So make sure
# attributes which are not supposed to be included for Rebuild
# response are not present.
body = {
"rebuild": {
"imageRef": self.image_uuid,
},
}
body = self.controller._action_rebuild(self.req, FAKE_UUID,
body=body).obj
get_only_fields = copy.deepcopy(GET_ONLY_FIELDS)
if self.expected_key_name:
get_only_fields.remove('key_name')
for field in get_only_fields:
self.assertNotIn(field, body['server'])
@mock.patch.object(compute_api.API, 'start')
def test_start(self, mock_start):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(start="")
self.controller._start_server(req, FAKE_UUID, body)
mock_start.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(compute_api.API, 'start', fake_start_stop_not_ready)
def test_start_not_ready(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(
compute_api.API, 'start', fakes.fake_actions_to_locked_server)
def test_start_locked_server(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'start', fake_start_stop_invalid_state)
def test_start_invalid(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'stop')
def test_stop(self, mock_stop):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(stop="")
self.controller._stop_server(req, FAKE_UUID, body)
mock_stop.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch.object(compute_api.API, 'stop', fake_start_stop_not_ready)
def test_stop_not_ready(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch.object(
compute_api.API, 'stop', fakes.fake_actions_to_locked_server)
def test_stop_locked_server(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'stop', fake_start_stop_invalid_state)
def test_stop_invalid_state(self):
req = fakes.HTTPRequestV21.blank(self.path_action % FAKE_UUID)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, req, FAKE_UUID, body)
@mock.patch(
'nova.db.api.instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
def test_start_with_bogus_id(self):
req = fakes.HTTPRequestV21.blank(self.path_action % 'test_inst')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
@mock.patch(
'nova.db.api.instance_get_by_uuid',
fake_instance_get_by_uuid_not_found)
def test_stop_with_bogus_id(self):
req = fakes.HTTPRequestV21.blank(self.path_action % 'test_inst')
body = dict(stop="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
class ServersControllerRebuildTestV254(ServersControllerRebuildInstanceTest):
expected_key_name = True
def setUp(self):
super(ServersControllerRebuildTestV254, self).setUp()
fakes.stub_out_key_pair_funcs(self)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.54')
def _test_set_key_name_rebuild(self, set_key_name=True):
key_name = "key"
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
key_name=key_name,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.mock_get.side_effect = fake_get
if set_key_name:
self.body['rebuild']['key_name'] = key_name
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller._action_rebuild(
self.req, FAKE_UUID,
body=self.body).obj['server']
self.assertEqual(server['id'], FAKE_UUID)
self.assertEqual(server['key_name'], key_name)
def test_rebuild_accepted_with_keypair_name(self):
self._test_set_key_name_rebuild()
def test_rebuild_key_not_changed(self):
self._test_set_key_name_rebuild(set_key_name=False)
def test_rebuild_invalid_microversion_253(self):
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.53')
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": "key"
},
}
excpt = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
self.assertIn('key_name', six.text_type(excpt))
def test_rebuild_with_not_existed_keypair_name(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": "nonexistentkey"
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_user_has_no_key_pair(self):
def no_key_pair(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stub_out('nova.db.api.key_pair_get', no_key_pair)
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
key_name=None,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.mock_get.side_effect = fake_get
self.body['rebuild']['key_name'] = "a-key-name"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_with_non_string_keypair_name(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": 12345
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_invalid_keypair_name(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": "123\0d456"
},
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_empty_keypair_name(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": ''
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
def test_rebuild_with_none_keypair_name(self):
key_name = None
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
key_name=key_name,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.mock_get.side_effect = fake_get
with mock.patch.object(objects.KeyPair, 'get_by_name') as key_get:
self.body['rebuild']['key_name'] = key_name
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller._action_rebuild(
self.req, FAKE_UUID,
body=self.body)
# NOTE: because the api will call _get_server twice. The server
# response will always be the same one. So we just use
# objects.KeyPair.get_by_name to verify test.
key_get.assert_not_called()
def test_rebuild_with_too_large_keypair_name(self):
body = {
"rebuild": {
"imageRef": self.image_uuid,
"key_name": 256 * "k"
},
}
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
class ServersControllerRebuildTestV257(ServersControllerRebuildTestV254):
"""Tests server rebuild at microversion 2.57 where user_data can be
provided and personality files are no longer accepted.
"""
def setUp(self):
super(ServersControllerRebuildTestV257, self).setUp()
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.57')
def test_rebuild_personality(self):
"""Tests that trying to rebuild with personality files fails."""
body = {
"rebuild": {
"imageRef": self.image_uuid,
"personality": [{
"path": "/path/to/file",
"contents": base64.encode_as_text("Test String"),
}]
}
}
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
self.assertIn('personality', six.text_type(ex))
def test_rebuild_user_data_old_version(self):
"""Tests that trying to rebuild with user_data before 2.57 fails."""
body = {
"rebuild": {
"imageRef": self.image_uuid,
"user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
}
}
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.55')
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
self.assertIn('user_data', six.text_type(ex))
def test_rebuild_user_data_malformed(self):
"""Tests that trying to rebuild with malformed user_data fails."""
body = {
"rebuild": {
"imageRef": self.image_uuid,
"user_data": b'invalid'
}
}
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
self.assertIn('user_data', six.text_type(ex))
def test_rebuild_user_data_too_large(self):
"""Tests that passing user_data to rebuild that is too large fails."""
body = {
"rebuild": {
"imageRef": self.image_uuid,
"user_data": ('MQ==' * 16384)
}
}
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
self.assertIn('user_data', six.text_type(ex))
@mock.patch.object(context.RequestContext, 'can')
@mock.patch('nova.db.api.instance_update_and_get_original')
def test_rebuild_reset_user_data(self, mock_update, mock_policy):
"""Tests that passing user_data=None resets the user_data on the
instance.
"""
body = {
"rebuild": {
"imageRef": self.image_uuid,
"user_data": None
}
}
self.mock_get.side_effect = None
self.mock_get.return_value = fakes.stub_instance_obj(
context.RequestContext(self.req_user_id, self.req_project_id),
user_data='ZWNobyAiaGVsbG8gd29ybGQi')
def fake_instance_update_and_get_original(
ctxt, instance_uuid, values, **kwargs):
# save() is called twice and the second one has system_metadata
# in the updates, so we can ignore that one.
if 'system_metadata' not in values:
self.assertIn('user_data', values)
self.assertIsNone(values['user_data'])
return instance_update_and_get_original(
ctxt, instance_uuid, values, **kwargs)
mock_update.side_effect = fake_instance_update_and_get_original
self.controller._action_rebuild(self.req, FAKE_UUID, body=body)
self.assertEqual(2, mock_update.call_count)
class ServersControllerRebuildTestV219(ServersControllerRebuildInstanceTest):
def setUp(self):
super(ServersControllerRebuildTestV219, self).setUp()
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.19')
def _rebuild_server(self, set_desc, desc):
fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
display_description=desc,
project_id=self.req_project_id,
user_id=self.req_user_id)
self.mock_get.side_effect = fake_get
if set_desc:
self.body['rebuild']['description'] = desc
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller._action_rebuild(self.req, FAKE_UUID,
body=self.body).obj['server']
self.assertEqual(server['id'], FAKE_UUID)
self.assertEqual(server['description'], desc)
def test_rebuild_server_with_description(self):
self._rebuild_server(True, 'server desc')
def test_rebuild_server_empty_description(self):
self._rebuild_server(True, '')
def test_rebuild_server_without_description(self):
self._rebuild_server(False, '')
def test_rebuild_server_remove_description(self):
self._rebuild_server(True, None)
def test_rebuild_server_description_too_long(self):
self.body['rebuild']['description'] = 'x' * 256
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
def test_rebuild_server_description_invalid(self):
# Invalid non-printable control char in the desc.
self.body['rebuild']['description'] = "123\0d456"
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
# NOTE(jaypipes): Not based from ServersControllerRebuildInstanceTest because
# that test case's setUp is completely b0rked
class ServersControllerRebuildTestV263(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def setUp(self):
super(ServersControllerRebuildTestV263, self).setUp()
self.req = fakes.HTTPRequest.blank(self.path_action % FAKE_UUID)
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
self.req_user_id = self.req.environ['nova.context'].user_id
self.req_project_id = self.req.environ['nova.context'].project_id
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.63')
self.body = {
'rebuild': {
'name': 'new_name',
'imageRef': self.image_uuid,
'metadata': {
'open': 'stack',
},
},
}
@mock.patch('nova.compute.api.API.get')
def _rebuild_server(self, mock_get, certs=None,
conf_enabled=True, conf_certs=None):
fakes.stub_out_trusted_certs(self, certs=certs)
ctx = self.req.environ['nova.context']
mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, trusted_certs=certs,
project_id=self.req_project_id, user_id=self.req_user_id)
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
if conf_enabled:
self.flags(verify_glance_signatures=True, group='glance')
self.flags(enable_certificate_validation=True, group='glance')
self.body['rebuild']['trusted_image_certificates'] = certs
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller._action_rebuild(
self.req, FAKE_UUID, body=self.body).obj['server']
if certs:
self.assertEqual(certs, server['trusted_image_certificates'])
else:
if conf_enabled:
# configuration file default is used
self.assertEqual(
conf_certs, server['trusted_image_certificates'])
else:
# either not set or empty
self.assertIsNone(server['trusted_image_certificates'])
def test_rebuild_server_with_trusted_certs(self):
"""Test rebuild with valid trusted_image_certificates argument"""
self._rebuild_server(
certs=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
def test_rebuild_server_without_trusted_certs(self):
"""Test rebuild without trusted image certificates"""
self._rebuild_server()
def test_rebuild_server_conf_options_turned_off_set(self):
"""Test rebuild with feature disabled and certs specified"""
self._rebuild_server(
certs=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8'], conf_enabled=False)
def test_rebuild_server_conf_options_turned_off_empty(self):
"""Test rebuild with feature disabled"""
self._rebuild_server(conf_enabled=False)
def test_rebuild_server_default_trusted_certificates_empty(self):
"""Test rebuild with feature enabled and no certs specified"""
self._rebuild_server(conf_enabled=True)
def test_rebuild_server_default_trusted_certificates(self):
"""Test rebuild with certificate specified in configurations"""
self._rebuild_server(conf_enabled=True, conf_certs=['conf-id'])
def test_rebuild_server_with_empty_trusted_cert_id(self):
"""Make sure that we can't rebuild with an empty certificate ID"""
self.body['rebuild']['trusted_image_certificates'] = ['']
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_rebuild_server_with_empty_trusted_certs(self):
"""Make sure that we can't rebuild with an empty array of IDs"""
self.body['rebuild']['trusted_image_certificates'] = []
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_rebuild_server_with_too_many_trusted_certs(self):
"""Make sure that we can't rebuild with an array of >50 unique IDs"""
self.body['rebuild']['trusted_image_certificates'] = [
'cert{}'.format(i) for i in range(51)]
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('is too long', six.text_type(ex))
def test_rebuild_server_with_nonunique_trusted_certs(self):
"""Make sure that we can't rebuild with a non-unique array of IDs"""
self.body['rebuild']['trusted_image_certificates'] = ['cert', 'cert']
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('has non-unique elements', six.text_type(ex))
def test_rebuild_server_with_invalid_trusted_cert_id(self):
"""Make sure that we can't rebuild with non-string certificate IDs"""
self.body['rebuild']['trusted_image_certificates'] = [1, 2]
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_rebuild_server_with_invalid_trusted_certs(self):
"""Make sure that we can't rebuild with certificates in a non-array"""
self.body['rebuild']['trusted_image_certificates'] = "not-an-array"
self.req.body = jsonutils.dump_as_bytes(self.body)
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_rebuild_server_with_trusted_certs_pre_2_63_fails(self):
"""Make sure we can't use trusted_certs before 2.63"""
self._rebuild_server(certs=['trusted-cert-id'])
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.62')
ex = self.assertRaises(exception.ValidationError,
self.controller._action_rebuild,
self.req, FAKE_UUID, body=self.body)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
def test_rebuild_server_with_trusted_certs_policy_failed(self):
rule_name = "os_compute_api:servers:rebuild:trusted_certs"
rules = {"os_compute_api:servers:rebuild": "@",
rule_name: "project:%s" % fakes.FAKE_PROJECT_ID}
self.policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self._rebuild_server,
certs=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8'])
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_server_with_cert_validation_error(
self, mock_rebuild):
mock_rebuild.side_effect = exception.CertificateValidationFailed(
cert_uuid="cert id", reason="test cert validation error")
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._rebuild_server,
certs=['trusted-cert-id'])
self.assertIn('test cert validation error',
six.text_type(ex))
class ServersControllerRebuildTestV271(ControllerTest):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def setUp(self):
super(ServersControllerRebuildTestV271, self).setUp()
self.req = fakes.HTTPRequest.blank(self.path_action % FAKE_UUID,
use_admin_context=True)
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
self.req_user_id = self.req.environ['nova.context'].user_id
self.req_project_id = self.req.environ['nova.context'].project_id
self.req.api_version_request = (api_version_request.
APIVersionRequest('2.71'))
self.body = {
"rebuild": {
"imageRef": self.image_uuid,
"user_data": None
}
}
@mock.patch('nova.compute.api.API.get')
def _rebuild_server(self, mock_get):
ctx = self.req.environ['nova.context']
mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
self.req, FAKE_UUID, body=self.body).obj['server']
return server
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid',
side_effect=exception.InstanceGroupNotFound(group_uuid=FAKE_UUID))
def test_rebuild_with_server_group_not_exist(self, mock_sg_get):
server = self._rebuild_server()
self.assertEqual([], server['server_groups'])
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None):
req = fakes.HTTPRequestV21.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
fake_get = fakes.fake_compute_get(
project_id=req.environ['nova.context'].project_id,
user_id=req.environ['nova.context'].user_id)
self.mock_get.side_effect = fake_get
return req
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_name(self):
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_response_has_no_show_server_only_attributes(self):
# There are some old server attributes which were added only for
# GET server APIs not for PUT. GET server and PUT server share the
# same view builder method SHOW() to build the response, So make sure
# attributes which are not supposed to be included for PUT
# response are not present.
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
for field in GET_ONLY_FIELDS:
self.assertNotIn(field, res_dict['server'])
def test_update_server_name_too_long(self):
body = {'server': {'name': 'x' * 256}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_all_blank_spaces(self):
self.stub_out('nova.db.api.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' ' * 64}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_name_with_spaces_in_the_middle(self):
body = {'server': {'name': 'abc def'}}
req = self._get_request(body)
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces(self):
self.stub_out('nova.db.api.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'server': {'name': ' abc def '}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces_compat_mode(self):
body = {'server': {'name': ' abc def '}}
req = self._get_request(body)
req.set_legacy_v2()
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_admin_password_extra_arg(self):
inst_dict = dict(name='server_test', admin_password='bacon')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_host_id(self):
inst_dict = dict(host_id='123')
body = dict(server=inst_dict)
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_not_found(self):
self.mock_get.side_effect = exception.InstanceNotFound(
instance_id='fake')
body = {'server': {'name': 'server_test'}}
req = fakes.HTTPRequest.blank(self.path_with_id % FAKE_UUID)
req.method = 'PUT'
req.content_type = "application/json"
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'update_instance')
def test_update_server_not_found_on_update(self, mock_update_instance):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
mock_update_instance.side_effect = fake_update
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_policy_fail(self):
rule = {'compute:update': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update, req, FAKE_UUID, body=body)
class ServersControllerTriggerCrashDumpTest(ControllerTest):
def setUp(self):
super(ServersControllerTriggerCrashDumpTest, self).setUp()
self.instance = fakes.stub_instance_obj(None,
vm_state=vm_states.ACTIVE,
project_id=self.project_id)
def fake_get(ctrl, ctxt, uuid):
if uuid != FAKE_UUID:
raise webob.exc.HTTPNotFound(explanation='fakeout')
return self.instance
self.useFixture(
fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
'ServersController._get_instance',
fake_get))
self.req = fakes.HTTPRequest.blank(self.path_action % FAKE_UUID)
self.req.api_version_request =\
api_version_request.APIVersionRequest('2.17')
self.body = dict(trigger_crash_dump=None)
@mock.patch.object(compute_api.API, 'trigger_crash_dump')
def test_trigger_crash_dump(self, mock_trigger_crash_dump):
ctxt = self.req.environ['nova.context']
self.controller._action_trigger_crash_dump(self.req, FAKE_UUID,
body=self.body)
mock_trigger_crash_dump.assert_called_with(ctxt, self.instance)
def test_trigger_crash_dump_policy_failed(self):
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
self.assertIn("os_compute_api:servers:trigger_crash_dump",
exc.format_message())
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fake_start_stop_not_ready)
def test_trigger_crash_dump_not_ready(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fakes.fake_actions_to_locked_server)
def test_trigger_crash_dump_locked_server(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
@mock.patch.object(compute_api.API, 'trigger_crash_dump',
fake_start_stop_invalid_state)
def test_trigger_crash_dump_invalid_state(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
def test_trigger_crash_dump_with_bogus_id(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_trigger_crash_dump,
self.req, 'test_inst', body=self.body)
def test_trigger_crash_dump_schema_invalid_type(self):
self.body['trigger_crash_dump'] = 'not null'
self.assertRaises(exception.ValidationError,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
def test_trigger_crash_dump_schema_extra_property(self):
self.body['extra_property'] = 'extra'
self.assertRaises(exception.ValidationError,
self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body=self.body)
class ServersControllerUpdateTestV219(ServersControllerUpdateTest):
def _get_request(self, body=None):
req = super(ServersControllerUpdateTestV219, self)._get_request(
body=body)
req.api_version_request = api_version_request.APIVersionRequest('2.19')
return req
def _update_server_desc(self, set_desc, desc=None):
body = {'server': {}}
if set_desc:
body['server']['description'] = desc
req = self._get_request()
res_dict = self.controller.update(req, FAKE_UUID, body=body)
return res_dict
def test_update_server_description(self):
res_dict = self._update_server_desc(True, 'server_desc')
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['description'], 'server_desc')
def test_update_server_empty_description(self):
res_dict = self._update_server_desc(True, '')
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['description'], '')
def test_update_server_without_description(self):
res_dict = self._update_server_desc(False)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertIsNone(res_dict['server']['description'])
def test_update_server_remove_description(self):
res_dict = self._update_server_desc(True)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertIsNone(res_dict['server']['description'])
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
'description': 'server_desc'
}}
req = self._get_request(body)
res_dict = self.controller.update(req, FAKE_UUID, body=body)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server_test')
self.assertEqual(res_dict['server']['description'], 'server_desc')
def test_update_server_description_too_long(self):
body = {'server': {'description': 'x' * 256}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
def test_update_server_description_invalid(self):
# Invalid non-printable control char in the desc.
body = {'server': {'description': "123\0d456"}}
req = self._get_request(body)
self.assertRaises(exception.ValidationError, self.controller.update,
req, FAKE_UUID, body=body)
class ServersControllerUpdateTestV271(ServersControllerUpdateTest):
body = {'server': {'name': 'server_test'}}
def _get_request(self, body=None):
req = super(ServersControllerUpdateTestV271, self)._get_request(
body=body)
req.api_version_request = api_version_request.APIVersionRequest('2.71')
return req
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid',
side_effect=exception.InstanceGroupNotFound(group_uuid=FAKE_UUID))
def test_update_with_server_group_not_exist(self, mock_sg_get):
req = self._get_request(self.body)
res_dict = self.controller.update(req, FAKE_UUID, body=self.body)
self.assertEqual([], res_dict['server']['server_groups'])
class ServerStatusTest(test.TestCase):
project_id = fakes.FAKE_PROJECT_ID
path = '/%s/servers' % project_id
path_with_id = path + '/%s'
path_action = path + '/%s/action'
def setUp(self):
super(ServerStatusTest, self).setUp()
fakes.stub_out_nw_api(self)
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])
self.controller = servers.ServersController()
def _get_with_state(self, vm_state, task_state=None):
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(vm_state=vm_state,
task_state=task_state))
request = fakes.HTTPRequestV21.blank(self.path_with_id % FAKE_UUID)
return self.controller.show(request, FAKE_UUID)
def test_active(self):
response = self._get_with_state(vm_states.ACTIVE)
self.assertEqual(response['server']['status'], 'ACTIVE')
def test_reboot(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING)
self.assertEqual(response['server']['status'], 'REBOOT')
def test_reboot_hard(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBOOTING_HARD)
self.assertEqual(response['server']['status'], 'HARD_REBOOT')
def test_reboot_resize_policy_fail(self):
rule = {'compute:reboot': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_reboot, req, '1234',
body={'reboot': {'type': 'HARD'}})
def test_rebuild(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.REBUILDING)
self.assertEqual(response['server']['status'], 'REBUILD')
def test_rebuild_error(self):
response = self._get_with_state(vm_states.ERROR)
self.assertEqual(response['server']['status'], 'ERROR')
def test_resize(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.RESIZE_PREP)
self.assertEqual(response['server']['status'], 'RESIZE')
def test_confirm_resize_policy_fail(self):
rule = {'compute:confirm_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_confirm_resize, req, '1234', {})
def test_verify_resize(self):
response = self._get_with_state(vm_states.RESIZED, None)
self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
def test_revert_resize(self):
response = self._get_with_state(vm_states.RESIZED,
task_states.RESIZE_REVERTING)
self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
def test_revert_resize_policy_fail(self):
rule = {'compute:revert_resize': 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rule))
req = fakes.HTTPRequestV21.blank(self.path_action % '1234')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_revert_resize, req, '1234', {})
def test_password_update(self):
response = self._get_with_state(vm_states.ACTIVE,
task_states.UPDATING_PASSWORD)
self.assertEqual(response['server']['status'], 'PASSWORD')
def test_stopped(self):
response = self._get_with_state(vm_states.STOPPED)
self.assertEqual(response['server']['status'], 'SHUTOFF')
class ServersControllerCreateTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
project_id = fakes.FAKE_PROJECT_ID
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(enable_instance_password=True, group='api')
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self)
self.controller = servers.ServersController()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/%s/images/%s' % (self.project_id,
image_uuid)
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'display_description': inst['display_description'] or '',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': fakes.FAKE_PROJECT_ID,
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update_and_get_original(
context, instance_uuid, params, columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
fakes.stub_out_key_pair_funcs(self)
fake.stub_out_image_service(self)
self.stub_out('nova.db.api.project_get_networks',
lambda c, u: dict(id='1', host='localhost'))
self.stub_out('nova.db.api.instance_create', instance_create)
self.stub_out('nova.db.api.instance_system_metadata_update',
lambda *a, **kw: None)
self.stub_out('nova.db.api.instance_get', instance_get)
self.stub_out('nova.db.api.instance_update', instance_update)
self.stub_out('nova.db.api.instance_update_and_get_original',
server_update_and_get_original)
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'networks': [{
'uuid': 'ff608d40-75e9-48cb-b745-77bb55b5eaf2'
}],
},
}
self.bdm_v2 = [{
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake',
'device_name': 'vdb',
'delete_on_termination': False,
}]
self.bdm = [{
'no_device': None,
'virtual_name': 'root',
'volume_id': fakes.FAKE_UUID,
'device_name': 'vda',
'delete_on_termination': False
}]
self.req = fakes.HTTPRequest.blank('/%s/servers' % self.project_id)
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
server = dict(name='server_test', imageRef=FAKE_UUID, flavorRef=2)
body = {'server': server}
self.req.body = encodeutils.safe_encode(jsonutils.dumps(body))
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("adminPass", server_dict)
def _test_create_instance(self, flavor=2):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.body['server']['imageRef'] = image_uuid
self.body['server']['flavorRef'] = flavor
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_none_value_port(self):
self.body['server'] = {'networks': [{'port': None, 'uuid': FAKE_UUID}]}
self.body['server']['name'] = 'test'
self._test_create_instance()
def test_create_instance_private_flavor(self):
values = {
'name': 'fake_name',
'memory': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': '1324',
'swap': 0,
'rxtx_factor': 0.5,
'is_public': False,
}
flavors.create(**values)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_instance,
flavor=1324)
self.assertEqual('Flavor 1324 could not be found.', six.text_type(ex))
def test_create_server_bad_image_uuid(self):
self.body['server']['min_count'] = 1
self.body['server']['imageRef'] = 1,
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
def test_create_server_with_deleted_image(self):
# Get the fake image service so we can set the status to deleted
(image_service, image_id) = glance.get_remote_image_service(
context, '')
image_service.update(context, self.image_uuid, {'status': 'DELETED'})
self.addCleanup(image_service.update, context, self.image_uuid,
{'status': 'active'})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dump_as_bytes(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
self.controller.create(self.req, body=self.body)
def test_create_server_image_too_large(self):
# Get the fake image service so we can update the size of the image
(image_service, image_id) = glance.get_remote_image_service(
context, self.image_uuid)
image = image_service.show(context, image_id)
orig_size = image['size']
new_size = str(1000 * (1024 ** 3))
image_service.update(context, self.image_uuid, {'size': new_size})
self.addCleanup(image_service.update, context, self.image_uuid,
{'size': orig_size})
self.body['server']['flavorRef'] = 2
self.req.body = jsonutils.dump_as_bytes(self.body)
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
"Flavor's disk is too small for requested image."):
self.controller.create(self.req, body=self.body)
@mock.patch.object(fake._FakeImageService, 'show',
return_value=dict(
id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
status='active',
properties=dict(
cinder_encryption_key_id=fakes.FAKE_UUID)))
def test_create_server_image_nonbootable(self, mock_show):
self.req.body = jsonutils.dump_as_bytes(self.body)
expected_msg = ("Image {} is unacceptable: Direct booting of an image "
"uploaded from an encrypted volume is unsupported.")
with testtools.ExpectedException(
webob.exc.HTTPBadRequest,
expected_msg.format(self.image_uuid)):
self.controller.create(self.req, body=self.body)
def test_create_instance_with_image_non_uuid(self):
self.body['server']['imageRef'] = 'not-uuid'
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_image_as_full_url(self):
image_href = ('http://localhost/v2/%s/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' % self.project_id)
self.body['server']['imageRef'] = image_href
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
def test_create_instance_with_image_as_empty_string(self):
self.body['server']['imageRef'] = ''
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self, have_key_pair=False)
self._test_create_instance()
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PortRequiresFixedIP(
port_id=uuids.port))
def test_create_instance_with_port_with_no_fixed_ips(self, mock_create):
requested_networks = [{'port': uuids.port}]
params = {'networks': requested_networks}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
def test_create_instance_raise_user_data_too_large(self):
self.body['server']['user_data'] = (b'1' * 65536)
ex = self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
# Make sure the failure was about user_data and not something else.
self.assertIn('user_data', six.text_type(ex))
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.NetworkRequiresSubnet(
network_uuid=uuids.network))
def test_create_instance_with_network_with_no_subnet(self, mock_create):
requested_networks = [{'uuid': uuids.network}]
params = {'networks': requested_networks}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.NoUniqueMatch(
"No Unique match found for ..."))
def test_create_instance_with_non_unique_secgroup_name(self, mock_create):
requested_networks = [{'uuid': uuids.network}]
params = {'networks': requested_networks,
'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
def test_create_instance_secgroup_leading_trailing_spaces(self):
network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
requested_networks = [{'uuid': network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_secgroup_leading_trailing_spaces_compat_mode(
self, mock_create):
requested_networks = [{'uuid': uuids.network}]
params = {'networks': requested_networks,
'security_groups': [{'name': ' sg '}]}
def fake_create(*args, **kwargs):
self.assertEqual([' sg '], kwargs['security_groups'])
return (objects.InstanceList(objects=[fakes.stub_instance_obj(
self.req.environ['nova.context'])]), None)
mock_create.side_effect = fake_create
self.req.set_legacy_v2()
self._test_create_extra(params)
def test_create_instance_with_networks_disabled_neutronv2(self):
net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
requested_networks = [{'uuid': net_uuid}]
params = {'networks': requested_networks}
old_create = compute_api.API.create
def create(*args, **kwargs):
result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
None, None)]
self.assertEqual(result, kwargs['requested_networks'].as_tuples())
return old_create(*args, **kwargs)
with mock.patch('nova.compute.api.API.create', create):
self._test_create_extra(params)
def test_create_instance_with_pass_disabled(self):
# test with admin passwords disabled See lp bug 921814
self.flags(enable_instance_password=False, group='api')
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
self.flags(enable_instance_password=False, group='api')
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_name_too_long(self):
self.body['server']['name'] = 'X' * 256
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_name_with_spaces_in_the_middle(self):
self.body['server']['name'] = 'abc def'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces(self):
self.body['server']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_name_with_leading_trailing_spaces_in_compat_mode(
self):
self.body['server']['name'] = ' abc def '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
self.controller.create(self.req, body=self.body)
def test_create_instance_name_all_blank_spaces(self):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/%s/flavors/3' % self.project_id
body = {
'server': {
'name': ' ' * 64,
'imageRef': image_uuid,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
req = fakes.HTTPRequest.blank('/%s/servers' % self.project_id)
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_az_with_leading_trailing_spaces(self):
self.body['server']['availability_zone'] = ' zone1 '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_az_with_leading_trailing_spaces_in_compat_mode(
self):
self.body['server']['name'] = ' abc def '
self.body['server']['availability_zones'] = ' zone1 '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.set_legacy_v2()
with mock.patch.object(availability_zones, 'get_availability_zones',
return_value=[' zone1 ']):
self.controller.create(self.req, body=self.body)
def test_create_instance(self):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_len(server)
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_pass_disabled(self):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
self.flags(enable_instance_password=False, group='api')
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self._check_admin_password_missing(server)
self.assertEqual(FAKE_UUID, server['id'])
@mock.patch('nova.virt.hardware.numa_get_constraints')
def _test_create_instance_numa_topology_wrong(self, exc,
numa_constraints_mock):
numa_constraints_mock.side_effect = exc(**{
'name': None,
'source': 'flavor',
'requested': 'dummy',
'available': str(objects.fields.CPUAllocationPolicy.ALL),
'cpunum': 0,
'cpumax': 0,
'cpuset': None,
'memsize': 0,
'memtotal': 0})
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_numa_topology_wrong(self):
for exc in [exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.InvalidCPUAllocationPolicy,
exception.InvalidCPUThreadAllocationPolicy,
exception.ImageNUMATopologyMemoryOutOfRange]:
self._test_create_instance_numa_topology_wrong(exc)
def test_create_instance_too_much_metadata(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata']['vote'] = 'fiddletown'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_too_long(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {('a' * 260): '12345'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_too_long(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'key1': ('a' * 260)}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_blank(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'': 'abcd'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_not_dict(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = 'string'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_key_not_string(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {1: 'test'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_metadata_value_not_string(self):
self.flags(metadata_items=1, group='quota')
self.body['server']['metadata'] = {'test': ['a', 'list']}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_user_data_malformed_bad_request(self):
params = {'user_data': 'u1234'}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
def _create_instance_body_of_config_drive(self, param):
def create(*args, **kwargs):
self.assertIn('config_drive', kwargs)
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stub_out('nova.compute.api.API.create', create)
self.body['server']['config_drive'] = param
self.req.body = jsonutils.dump_as_bytes(self.body)
def test_create_instance_with_config_drive(self):
param = True
self._create_instance_body_of_config_drive(param)
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_config_drive_as_boolean_string(self):
param = 'false'
self._create_instance_body_of_config_drive(param)
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_bad_config_drive(self):
param = 12345
self._create_instance_body_of_config_drive(param)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_without_config_drive(self):
def create(*args, **kwargs):
self.assertIsNone(kwargs['config_drive'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stub_out('nova.compute.api.API.create', create)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_empty_config_drive(self):
param = ''
self._create_instance_body_of_config_drive(param)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def _test_create(self, params, no_image=False):
self. body['server'].update(params)
if no_image:
del self.body['server']['imageRef']
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body).obj['server']
def test_create_instance_with_volumes_enabled_no_image(self):
"""Test that the create will fail if there is no image
and no bdms supplied in the request
"""
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create, {}, no_image=True)
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_bdms_and_no_image(
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_vols):
mock_bdm_image_metadata.return_value = {}
mock_validate_bdm.return_value = True
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm_v2[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once()
mock_bdm_image_metadata.assert_called_once_with(
mock.ANY, mock.ANY, False)
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_bdms_and_empty_imageRef(
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_volumes):
mock_bdm_image_metadata.return_value = {}
mock_validate_bdm.return_value = True
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertThat(
block_device.BlockDeviceDict(self.bdm_v2[0]),
matchers.DictMatches(kwargs['block_device_mapping'][0])
)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2,
'imageRef': ''}
self._test_create(params)
def test_create_instance_with_imageRef_as_full_url(self):
bdm = [{'device_name': 'foo'}]
image_href = ('http://localhost/v2/%s/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' % self.project_id)
params = {'block_device_mapping_v2': bdm,
'imageRef': image_href}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_non_uuid_imageRef(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping_v2': bdm,
'imageRef': '123123abcd'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_invalid_bdm_in_2nd_dict(self):
bdm_1st = {"source_type": "image", "delete_on_termination": True,
"boot_index": 0,
"uuid": "2ff3a1d3-ed70-4c3f-94ac-941461153bc0",
"destination_type": "local"}
bdm_2nd = {"source_type": "volume",
"uuid": "99d92140-3d0c-4ea5-a49c-f94c38c607f0",
"destination_type": "invalid"}
bdm = [bdm_1st, bdm_2nd]
params = {'block_device_mapping_v2': bdm,
'imageRef': '2ff3a1d3-ed70-4c3f-94ac-941461153bc0'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_boot_index_none_ok(self):
"""Tests creating a server with two block devices. One is the boot
device and the other is a non-bootable device.
"""
# From the docs:
# To disable a device from booting, set the boot index to a negative
# value or use the default boot index value, which is None. The
# simplest usage is, set the boot index of the boot device to 0 and use
# the default boot index value, None, for any other devices.
bdms = [
# This is the bootable device that would create a 20GB cinder
# volume from the given image.
{
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'uuid': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'volume_size': 20
},
# This is the non-bootable 10GB ext4 ephemeral block device.
{
'source_type': 'blank',
'destination_type': 'local',
'boot_index': None,
# If 'guest_format' is 'swap' then a swap device is created.
'guest_format': 'ext4'
}
]
params = {'block_device_mapping_v2': bdms}
self._test_create(params, no_image=True)
def test_create_instance_with_boot_index_none_image_local_fails(self):
"""Tests creating a server with a local image-based block device which
has a boot_index of None which is invalid.
"""
bdms = [{
'source_type': 'image',
'destination_type': 'local',
'boot_index': None,
'uuid': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}]
params = {'block_device_mapping_v2': bdms}
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create,
params, no_image=True)
def test_create_instance_with_invalid_boot_index(self):
bdm = [{"source_type": "image", "delete_on_termination": True,
"boot_index": 'invalid',
"uuid": "2ff3a1d3-ed70-4c3f-94ac-941461153bc0",
"destination_type": "local"}]
params = {'block_device_mapping_v2': bdm,
'imageRef': '2ff3a1d3-ed70-4c3f-94ac-941461153bc0'}
self.assertRaises(exception.ValidationError,
self._test_create, params)
def test_create_instance_with_device_name_not_string(self):
self.bdm_v2[0]['device_name'] = 123
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm_v2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(exception.ValidationError,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm_v2[0]['device_name'] = ''
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm_v2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm_v2[0]['device_name'] = 'a' * 256
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm_v2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm_v2[0]['device_name'] = 'v da'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm_v2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm_v2[0]['volume_size'] = 'hello world'
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm_v2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
def _test_create_instance_with_destination_type_error(self,
destination_type):
self.bdm_v2[0]['destination_type'] = destination_type
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(exception.ValidationError,
self._test_create, params, no_image=True)
def test_create_instance_with_destination_type_empty_string(self):
self._test_create_instance_with_destination_type_error('')
def test_create_instance_with_invalid_destination_type(self):
self._test_create_instance_with_destination_type_error('fake')
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
def test_create_instance_bdm(self, mock_validate_bdm, mock_get_volumes):
bdm = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'uuid': 'fake_vol'
}]
bdm_expected = [{
'source_type': 'volume',
'device_name': 'fake_dev',
'volume_id': 'fake_vol'
}]
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
for expected, received in zip(bdm_expected,
kwargs['block_device_mapping']):
self.assertThat(block_device.BlockDeviceDict(expected),
matchers.DictMatches(received))
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': bdm}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once()
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
def test_create_instance_bdm_missing_device_name(self, mock_validate_bdm,
mock_get_volumes):
del self.bdm_v2[0]['device_name']
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertFalse(kwargs['legacy_bdm'])
self.assertNotIn(None,
kwargs['block_device_mapping'][0]['device_name'])
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
params = {'block_device_mapping_v2': self.bdm_v2}
self._test_create(params, no_image=True)
mock_validate_bdm.assert_called_once()
@mock.patch.object(
block_device.BlockDeviceDict, '_validate',
side_effect=exception.InvalidBDMFormat(details='Wrong BDM'))
def test_create_instance_bdm_validation_error(self, mock_validate):
params = {'block_device_mapping_v2': self.bdm_v2}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create, params, no_image=True)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
params = {'block_device_mapping_v2': self.bdm_v2}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create, params,
no_image=True)
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
def test_create_instance_bdm_api_validation_fails(self, mock_get_volumes):
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
(exception.InvalidBDMVolume, {'id': 'fake'}),
(exception.InvalidBDMImage, {'id': 'fake'}),
(exception.InvalidBDMBootSequence, {}),
(exception.InvalidBDMLocalsLimit, {}))
ex_iter = iter(bdm_exceptions)
def _validate_bdm(*args, **kwargs):
self.validation_fail_test_validate_called = True
ex, kargs = next(ex_iter)
raise ex(**kargs)
def _instance_destroy(*args, **kwargs):
self.validation_fail_instance_destroy_called = True
self.stub_out('nova.compute.api.API._validate_bdm', _validate_bdm)
self.stub_out('nova.objects.Instance.destroy', _instance_destroy)
for _unused in range(len(bdm_exceptions)):
params = {'block_device_mapping_v2':
[self.bdm_v2[0].copy()]}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create, params)
self.assertTrue(self.validation_fail_test_validate_called)
self.assertFalse(self.validation_fail_instance_destroy_called)
self.validation_fail_test_validate_called = False
self.validation_fail_instance_destroy_called = False
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
@mock.patch.object(compute_api.API, '_validate_bdm')
def _test_create_bdm(self, params, mock_validate_bdm, mock_get_volumes,
no_image=False):
self.body['server'].update(params)
if no_image:
del self.body['server']['imageRef']
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body).obj['server']
mock_validate_bdm.assert_called_once_with(
test.MatchType(fakes.FakeRequestContext),
test.MatchType(objects.Instance),
test.MatchType(objects.Flavor),
test.MatchType(objects.BlockDeviceMappingList),
{},
mock_get_volumes.return_value,
False)
def test_create_instance_with_volumes_enabled(self):
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params)
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_volumes_enabled_and_bdms_no_image(
self, mock_get_bdm_image_metadata):
"""Test that the create works if there is no image supplied but
os-volumes extension is enabled and bdms are supplied
"""
volume = {
'id': uuids.volume_id,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
mock_get_bdm_image_metadata.return_value = volume
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params, no_image=True)
mock_get_bdm_image_metadata.assert_called_once_with(
mock.ANY, self.bdm, True)
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
def test_create_instance_with_imageRef_as_empty_string(
self, mock_bdm_image_metadata):
volume = {
'id': uuids.volume_id,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
mock_bdm_image_metadata.return_value = volume
params = {'block_device_mapping': self.bdm,
'imageRef': ''}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params)
def test_create_instance_with_imageRef_as_full_url_legacy_bdm(self):
bdm = [{
'volume_id': fakes.FAKE_UUID,
'device_name': 'vda'
}]
image_href = ('http://localhost/v2/%s/images/'
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' % self.project_id)
params = {'block_device_mapping': bdm,
'imageRef': image_href}
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def test_create_instance_with_non_uuid_imageRef_legacy_bdm(self):
bdm = [{
'volume_id': fakes.FAKE_UUID,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm,
'imageRef': 'bad-format'}
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails_legacy_bdm(
self, fake_bdm_meta):
bdm = [{
'volume_id': fakes.FAKE_UUID,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_bdm, params, no_image=True)
def test_create_instance_with_device_name_not_string_legacy_bdm(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
self.params = {'block_device_mapping': self.bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, self.params)
def test_create_instance_with_snapshot_volume_id_none(self):
old_create = compute_api.API.create
bdm = [{
'no_device': None,
'snapshot_id': None,
'volume_id': None,
'device_name': 'vda',
'delete_on_termination': False
}]
self.params = {'block_device_mapping': bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, self.params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_legacy_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(exception.ValidationError,
self._test_create_bdm, self.params)
def test_create_instance_with_device_name_empty_legacy_bdm(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def test_create_instance_with_device_name_too_long_legacy_bdm(self):
self.bdm[0]['device_name'] = 'a' * 256,
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def test_create_instance_with_space_in_device_name_legacy_bdm(self):
self.bdm[0]['device_name'] = 'vd a',
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def _test_create_bdm_instance_with_size_error(self, size):
bdm = [{'delete_on_termination': True,
'device_name': 'vda',
'volume_size': size,
'volume_id': '11111111-1111-1111-1111-111111111111'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def test_create_instance_with_invalid_size_legacy_bdm(self):
self._test_create_bdm_instance_with_size_error("hello world")
def test_create_instance_with_size_empty_string(self):
self._test_create_bdm_instance_with_size_error('')
def test_create_instance_with_size_zero(self):
self._test_create_bdm_instance_with_size_error("0")
def test_create_instance_with_size_greater_than_limit(self):
self._test_create_bdm_instance_with_size_error(db.MAX_INT + 1)
def test_create_instance_with_bdm_delete_on_termination(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'False'},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
expected_bdm = [
{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm(params)
def test_create_instance_with_bdm_delete_on_termination_invalid_2nd(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'invalid'}]
params = {'block_device_mapping': bdm}
self.assertRaises(exception.ValidationError,
self._test_create_bdm, params)
def test_create_instance_decide_format_legacy(self):
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_bdm({})
params = {'block_device_mapping': bdm}
self._test_create_bdm(params)
def test_create_instance_both_bdm_formats(self):
bdm = [{'device_name': 'foo'}]
bdm_v2 = [{'source_type': 'volume',
'uuid': 'fake_vol'}]
params = {'block_device_mapping': bdm,
'block_device_mapping_v2': bdm_v2}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_bdm, params)
def test_create_instance_invalid_key_name(self):
self.body['server']['key_name'] = 'nonexistentkey'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_valid_key_name(self):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
self.body['server']['key_name'] = 'key'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_server_keypair_name_with_leading_trailing(self):
self.body['server']['key_name'] = ' abc '
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create')
def test_create_server_keypair_name_with_leading_trailing_compat_mode(
self, mock_create):
params = {'key_name': ' abc '}
def fake_create(*args, **kwargs):
self.assertEqual(' abc ', kwargs['key_name'])
return (objects.InstanceList(objects=[fakes.stub_instance_obj(
self.req.environ['nova.context'])]), None)
mock_create.side_effect = fake_create
self.req.set_legacy_v2()
self._test_create_extra(params)
def test_create_instance_invalid_flavor_href(self):
flavor_ref = 'http://localhost/v2/flavors/asdf'
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_int(self):
flavor_ref = -1
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
@mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor())
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_non_existing_snapshot_id(
self, mock_create,
mock_get_flavor_by_flavor_id):
mock_create.side_effect = exception.SnapshotNotFound(snapshot_id='123')
self.body['server'] = {'name': 'server_test',
'flavorRef': self.flavor_ref,
'block_device_mapping_v2':
[{'source_type': 'snapshot',
'uuid': '123'}]}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_invalid_flavor_id_empty(self):
flavor_ref = ""
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_instance_bad_flavor_href(self):
flavor_ref = 'http://localhost/v2/flavors/17'
self.body['server']['flavorRef'] = flavor_ref
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_local_href(self):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_admin_password(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
server = res['server']
self.assertEqual(server['adminPass'],
self.body['server']['adminPass'])
def test_create_instance_admin_password_pass_disabled(self):
self.flags(enable_instance_password=False, group='api')
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = 'testpass'
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, body=self.body).obj
self.assertIn('server', res)
self.assertIn('adminPass', self.body['server'])
def test_create_instance_admin_password_empty(self):
self.body['server']['flavorRef'] = 3
self.body['server']['adminPass'] = ''
self.req.body = jsonutils.dump_as_bytes(self.body)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body)
def test_create_location(self):
self.stub_out('uuid.uuid4', lambda: FAKE_UUID)
selfhref = 'http://localhost/v2/%s/servers/%s' % (self.project_id,
FAKE_UUID)
self.req.body = jsonutils.dump_as_bytes(self.body)
robj = self.controller.create(self.req, body=self.body)
self.assertEqual(encodeutils.safe_decode(robj['Location']), selfhref)
@mock.patch('nova.objects.Quotas.get_all_by_project')
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user')
@mock.patch('nova.objects.Quotas.count_as_dict')
def _do_test_create_instance_above_quota(self, resource, allowed,
quota, expected_msg, mock_count, mock_get_all_pu,
mock_get_all_p):
count = {'project': {}, 'user': {}}
for res in ('instances', 'ram', 'cores'):
if res == resource:
value = quota - allowed
count['project'][res] = count['user'][res] = value
else:
count['project'][res] = count['user'][res] = 0
mock_count.return_value = count
mock_get_all_p.return_value = {'project_id': fakes.FAKE_PROJECT_ID}
mock_get_all_pu.return_value = {'project_id': fakes.FAKE_PROJECT_ID,
'user_id': 'fake_user'}
if resource in db_api.PER_PROJECT_QUOTAS:
mock_get_all_p.return_value[resource] = quota
else:
mock_get_all_pu.return_value[resource] = quota
fakes.stub_out_instance_quota(self, allowed, quota, resource)
self.body['server']['flavorRef'] = 3
self.req.body = jsonutils.dump_as_bytes(self.body)
try:
self.controller.create(self.req, body=self.body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_above_quota_instances(self):
msg = ('Quota exceeded for instances: Requested 1, but'
' already used 10 of 10 instances')
self._do_test_create_instance_above_quota('instances', 0, 10, msg)
def test_create_instance_above_quota_ram(self):
msg = ('Quota exceeded for ram: Requested 4096, but'
' already used 8192 of 10240 ram')
self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
def test_create_instance_above_quota_cores(self):
msg = ('Quota exceeded for cores: Requested 2, but'
' already used 9 of 10 cores')
self._do_test_create_instance_above_quota('cores', 1, 10, msg)
@mock.patch.object(fakes.QUOTAS, 'limit_check')
def test_create_instance_above_quota_server_group_members(
self, mock_limit_check):
ctxt = self.req.environ['nova.context']
fake_group = objects.InstanceGroup(ctxt)
fake_group.project_id = ctxt.project_id
fake_group.user_id = ctxt.user_id
fake_group.create()
real_count = fakes.QUOTAS.count_as_dict
def fake_count(context, name, group, user_id):
if name == 'server_group_members':
self.assertEqual(group.uuid, fake_group.uuid)
self.assertEqual(user_id,
self.req.environ['nova.context'].user_id)
return {'user': {'server_group_members': 10}}
else:
return real_count(context, name, group, user_id)
def fake_limit_check(context, **kwargs):
if 'server_group_members' in kwargs:
raise exception.OverQuota(overs={})
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
mock_limit_check.side_effect = fake_limit_check
self.stub_out('nova.db.api.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
expected_msg = "Quota exceeded, too many servers in group"
try:
with mock.patch.object(fakes.QUOTAS, 'count_as_dict',
side_effect=fake_count):
self.controller.create(self.req, body=self.body).obj
self.fail('expected quota to be exceeded')
except webob.exc.HTTPForbidden as e:
self.assertEqual(e.explanation, expected_msg)
def test_create_instance_with_group_hint(self):
ctxt = self.req.environ['nova.context']
test_group = objects.InstanceGroup(ctxt)
test_group.project_id = ctxt.project_id
test_group.user_id = ctxt.user_id
test_group.create()
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stub_out('nova.db.api.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': test_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
test_group = objects.InstanceGroup.get_by_uuid(ctxt, test_group.uuid)
self.assertIn(server['id'], test_group.members)
def _test_create_instance_with_group_hint(self, hint,
hint_name='os:scheduler_hints'):
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], hint)
return ([fakes.stub_instance(1)], '')
self.stub_out('nova.compute.api.API.create', fake_create)
self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body[hint_name] = hint
self.req.body = jsonutils.dump_as_bytes(self.body)
return self.controller.create(self.req, body=self.body).obj['server']
def test_create_instance_with_group_hint_legacy(self):
self._test_create_instance_with_group_hint(
{'different_host': '9c47bf55-e9d8-42da-94ab-7f9e80cd1857'},
hint_name='OS-SCH-HNT:scheduler_hints')
def test_create_server_with_different_host_hint(self):
self._test_create_instance_with_group_hint(
{'different_host': '9c47bf55-e9d8-42da-94ab-7f9e80cd1857'})
self._test_create_instance_with_group_hint(
{'different_host': ['9c47bf55-e9d8-42da-94ab-7f9e80cd1857',
'82412fa6-0365-43a9-95e4-d8b20e00c0de']})
def test_create_instance_with_group_hint_group_not_found(self):
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
self.stub_out('nova.db.api.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {
'group': '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_with_group_hint_wrong_uuid_format(self):
self.body['os:scheduler_hints'] = {
'group': 'non-uuid'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_server_bad_hints_non_dict(self):
sch_hints = ['os:scheduler_hints', 'OS-SCH-HNT:scheduler_hints']
for hint in sch_hints:
self.body[hint] = 'non-dict'
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_server_bad_hints_long_group(self):
self.body['os:scheduler_hints'] = {
'group': 'a' * 256}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
def test_create_server_with_bad_different_host_hint(self):
self.body['os:scheduler_hints'] = {
'different_host': 'non-server-id'}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
self.body['os:scheduler_hints'] = {
'different_host': ['non-server-id01', 'non-server-id02']}
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PortInUse(port_id=uuids.port))
def test_create_instance_with_port_in_use(self, mock_create):
requested_networks = [{'uuid': uuids.network, 'port': uuids.port}]
params = {'networks': requested_networks}
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_public_network_non_admin(self, mock_create):
public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
params = {'networks': [{'uuid': public_network_uuid}]}
self.req.body = jsonutils.dump_as_bytes(self.body)
mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
network_uuid=public_network_uuid)
self.assertRaises(webob.exc.HTTPForbidden,
self._test_create_extra, params)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
'min_count': min_count,
'max_count': max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
'min_count': min_count,
'max_count': max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'max_count': -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': '',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'max_count': '',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 4,
'max_count': 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'max_count': 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req,
body=body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
self.instance_cache_by_uuid[instance.uuid] = instance
return instance
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
res = self.controller.create(self.req, body=body).obj
instance_uuids = self.instance_cache_by_uuid.keys()
self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False, group='api')
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
self.instance_cache_by_uuid[instance.uuid] = instance
return instance
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
res = self.controller.create(self.req, body=body).obj
instance_uuids = self.instance_cache_by_uuid.keys()
self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_password_missing(res["server"])
def _create_multiple_instances_resv_id_return(self, resv_id_return):
"""Test creating multiple instances with asking for
reservation_id
"""
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
self.instance_cache_by_uuid[instance.uuid] = instance
return instance
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
'return_reservation_id': resv_id_return
}
}
res = self.controller.create(self.req, body=body)
reservation_id = res.obj['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertGreater(len(reservation_id), 1)
def test_create_multiple_instances_with_resv_id_return(self):
self._create_multiple_instances_resv_id_return(True)
def test_create_multiple_instances_with_string_resv_id_return(self):
self._create_multiple_instances_resv_id_return("True")
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
{'source_type': 'volume', 'uuid': 'vol-yyyy'}
]
params = {
'block_device_mapping_v2': bdm,
'min_count': min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
params = {
'block_device_mapping_v2': bdm,
'min_count': min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'max_count': 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body=body)
def test_create_multiple_instance_max_count_overquota_min_count_ok(self):
self.flags(instances=3, group='quota')
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 2,
'max_count': 5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
self.instance_cache_by_uuid[instance.uuid] = instance
return instance
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
res = self.controller.create(self.req, body=body).obj
instance_uuids = self.instance_cache_by_uuid.keys()
self.assertIn(res["server"]["id"], instance_uuids)
def test_create_multiple_instance_max_count_overquota_min_count_over(self):
self.flags(instances=3, group='quota')
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'min_count': 4,
'max_count': 5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, body=body)
@mock.patch.object(compute_api.API, 'create')
def test_create_multiple_instance_with_specified_ip_neutronv2(self,
_api_mock):
_api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
reason="")
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
address = '10.0.0.1'
requested_networks = [{'uuid': network, 'fixed_ip': address,
'port': port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.MultiplePortsNotApplicable(
reason="Unable to launch multiple instances with "
"a single configured port ID. Please "
"launch your instance one by one with "
"different ports."))
def test_create_multiple_instance_with_port(self, mock_create):
requested_networks = [{'uuid': uuids.network, 'port': uuids.port}]
params = {'networks': requested_networks}
self.body['server']['max_count'] = 2
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.NetworkNotFound(
network_id=uuids.network))
def test_create_instance_with_not_found_network(self, mock_create):
requested_networks = [{'uuid': uuids.network}]
params = {'networks': requested_networks}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PortNotFound(port_id=uuids.port))
def test_create_instance_with_port_not_found(self, mock_create):
requested_networks = [{'uuid': uuids.network, 'port': uuids.port}]
params = {'networks': requested_networks}
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_network_ambiguous(self, mock_create):
mock_create.side_effect = exception.NetworkAmbiguous()
self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.UnableToAutoAllocateNetwork(
project_id=FAKE_UUID))
def test_create_instance_with_unable_to_auto_allocate_network(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageNotAuthorized(
image_id=FAKE_UUID))
def test_create_instance_with_image_not_authorized(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InstanceExists(
name='instance-name'))
def test_create_instance_raise_instance_exists(self, mock_create):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMEphemeralSize)
def test_create_instance_raise_invalid_bdm_ephsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidNUMANodesNumber(
nodes='-1'))
def test_create_instance_raise_invalid_numa_nodes(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMFormat(details=''))
def test_create_instance_raise_invalid_bdm_format(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDMSwapSize)
def test_create_instance_raise_invalid_bdm_swapsize(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidBDM)
def test_create_instance_raise_invalid_bdm(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageBadRequest(
image_id='dummy', response='dummy'))
def test_create_instance_raise_image_bad_request(self, mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_invalid_availability_zone(self):
self.body['server']['availability_zone'] = 'invalid::::zone'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
def test_create_instance_invalid_availability_zone_as_int(self):
self.body['server']['availability_zone'] = 123
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.FixedIpNotFoundForAddress(
address='dummy'))
def test_create_instance_raise_fixed_ip_not_found_bad_request(self,
mock_create):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.CPUThreadPolicyConfigurationInvalid())
def test_create_instance_raise_cpu_thread_policy_configuration_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.get_mem_encryption_constraint',
side_effect=exception.FlavorImageConflict(
message="fake conflict reason"))
def test_create_instance_raise_flavor_image_conflict(
self, mock_conflict):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.get_mem_encryption_constraint',
side_effect=exception.InvalidMachineType(
message="fake conflict reason"))
def test_create_instance_raise_invalid_machine_type(
self, mock_conflict):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.ImageCPUPinningForbidden())
def test_create_instance_raise_image_cpu_pinning_forbidden(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.ImageCPUThreadPolicyForbidden())
def test_create_instance_raise_image_cpu_thread_policy_forbidden(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.MemoryPageSizeInvalid(pagesize='-1'))
def test_create_instance_raise_memory_page_size_invalid(self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.MemoryPageSizeForbidden(pagesize='1',
against='2'))
def test_create_instance_raise_memory_page_size_forbidden(self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.RealtimeConfigurationInvalid())
def test_create_instance_raise_realtime_configuration_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch('nova.virt.hardware.numa_get_constraints',
side_effect=exception.RealtimeMaskNotFoundOrInvalid())
def test_create_instance_raise_realtime_mask_not_found_or_invalid(
self, mock_numa):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_invalid_personality(self, mock_create):
# Personality files have been deprecated as of v2.57
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.56')
codec = 'utf8'
content = encodeutils.safe_encode(
'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==')
start_position = 19
end_position = 20
msg = 'invalid start byte'
mock_create.side_effect = UnicodeDecodeError(codec, content,
start_position,
end_position, msg)
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
},
]
self.req.body = jsonutils.dump_as_bytes(self.body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=self.body)
def test_create_instance_without_personality_should_get_empty_list(self):
# Personality files have been deprecated as of v2.57
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.56')
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual([], kwargs['injected_files'])
return old_create(*args, **kwargs)
self.stub_out('nova.compute.api.API.create', create)
self._test_create_instance()
def test_create_instance_with_extra_personality_arg(self):
# Personality files have been deprecated as of v2.57
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.56')
self.body['server']['personality'] = [
{
"path": "/etc/banner.txt",
"contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
"extra_arg": "extra value"
},
]
self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PciRequestAliasNotDefined(
alias='fake_name'))
def test_create_instance_pci_alias_not_defined(self, mock_create):
# Tests that PciRequestAliasNotDefined is translated to a 400 error.
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
self.assertIn('PCI alias fake_name is not defined', six.text_type(ex))
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.PciInvalidAlias(
reason='just because'))
def test_create_instance_pci_invalid_alias(self, mock_create):
# Tests that PciInvalidAlias is translated to a 400 error.
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
self.assertIn('Invalid PCI alias definition', six.text_type(ex))
def test_create_instance_with_user_data(self):
value = base64.encode_as_text("A random string")
params = {'user_data': value}
self._test_create_extra(params)
def test_create_instance_with_bad_user_data(self):
value = "A random string"
params = {'user_data': value}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
@mock.patch('nova.compute.api.API.create')
def test_create_instance_with_none_allowd_for_v20_compat_mode(self,
mock_create):
def create(context, *args, **kwargs):
self.assertIsNone(kwargs['user_data'])
return ([fakes.stub_instance_obj(context)], None)
mock_create.side_effect = create
self.req.set_legacy_v2()
params = {'user_data': None}
self._test_create_extra(params)
class ServersControllerCreateTestV219(ServersControllerCreateTest):
def _create_instance_req(self, set_desc, desc=None):
if set_desc:
self.body['server']['description'] = desc
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.19')
def test_create_instance_with_description(self):
self._create_instance_req(True, 'server_desc')
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_none_description(self):
self._create_instance_req(True)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_empty_description(self):
self._create_instance_req(True, '')
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_without_description(self):
self._create_instance_req(False)
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_description_too_long(self):
self._create_instance_req(True, 'X' * 256)
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
def test_create_instance_description_invalid(self):
self._create_instance_req(True, "abc\0ddef")
self.assertRaises(exception.ValidationError, self.controller.create,
self.req, body=self.body)
class ServersControllerCreateTestV232(test.NoDBTestCase):
def setUp(self):
super(ServersControllerCreateTestV232, self).setUp()
self.controller = servers.ServersController()
self.body = {
'server': {
'name': 'device-tagging-server',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': [{
'uuid': 'ff608d40-75e9-48cb-b745-77bb55b5eaf2'
}],
'block_device_mapping_v2': [{
'uuid': '70a599e0-31e7-49b7-b260-868f441e862b',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'volume_size': '1'
}]
}
}
self.req = fakes.HTTPRequestV21.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID, version='2.32')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self):
self.req.body = jsonutils.dump_as_bytes(self.body)
self.controller.create(self.req, body=self.body)
def test_create_server_no_tags(self):
with test.nested(
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self._create_server()
def test_create_server_tagged_nic(self):
with test.nested(
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self.body['server']['networks'][0]['tag'] = 'foo'
self._create_server()
def test_create_server_tagged_bdm(self):
with test.nested(
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f60012d9-5ba4-4547-ab48-f94ff7e62d4e'}],
1)),
):
self.body['server']['block_device_mapping_v2'][0]['tag'] = 'foo'
self._create_server()
class ServersControllerCreateTestV237(test.NoDBTestCase):
"""Tests server create scenarios with the v2.37 microversion.
These tests are mostly about testing the validation on the 2.37
server create request with emphasis on negative scenarios.
"""
def setUp(self):
super(ServersControllerCreateTestV237, self).setUp()
# Create the server controller.
self.controller = servers.ServersController()
# Define a basic server create request body which tests can customize.
self.body = {
'server': {
'name': 'auto-allocate-test',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
},
}
# Create a fake request using the 2.37 microversion.
self.req = fakes.HTTPRequestV21.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID, version='2.37')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self, networks):
self.body['server']['networks'] = networks
self.req.body = jsonutils.dump_as_bytes(self.body)
return self.controller.create(self.req, body=self.body).obj['server']
def test_create_server_auth_pre_2_37_fails(self):
"""Negative test to make sure you can't pass 'auto' before 2.37"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.36')
self.assertRaises(exception.ValidationError, self._create_server,
'auto')
def test_create_server_no_requested_networks_fails(self):
"""Negative test for a server create request with no networks requested
which should fail with the v2.37 schema validation.
"""
self.assertRaises(exception.ValidationError, self._create_server, None)
def test_create_server_network_id_not_uuid_fails(self):
"""Negative test for a server create request where the requested
network id is not one of the auto/none enums.
"""
self.assertRaises(exception.ValidationError, self._create_server,
'not-auto-or-none')
def test_create_server_network_id_empty_string_fails(self):
"""Negative test for a server create request where the requested
network id is the empty string.
"""
self.assertRaises(exception.ValidationError, self._create_server, '')
@mock.patch.object(context.RequestContext, 'can')
def test_create_server_networks_none_skip_policy(self, context_can):
"""Test to ensure skip checking policy rule create:attach_network,
when networks is 'none' which means no network will be allocated.
"""
with test.nested(
mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=14),
mock.patch.object(nova.compute.flavors, 'get_flavor_by_flavor_id',
return_value=objects.Flavor()),
mock.patch.object(
compute_api.API, 'create',
return_value=(
[{'uuid': 'f9bccadf-5ab1-4a56-9156-c00c178fe5f5'}],
1)),
):
network_policy = server_policies.SERVERS % 'create:attach_network'
self._create_server('none')
call_list = [c for c in context_can.call_args_list
if c[0][0] == network_policy]
self.assertEqual(0, len(call_list))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_auto_flavornotfound(self, get_flavor):
"""Tests that requesting auto networking is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server, 'auto')
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_none_flavornotfound(self, get_flavor):
"""Tests that requesting none for networking is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server, 'none')
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
@mock.patch.object(objects.Flavor, 'get_by_flavor_id',
side_effect=exception.FlavorNotFound(flavor_id='2'))
def test_create_server_multiple_specific_nics_flavornotfound(self,
get_flavor):
"""Tests that requesting multiple specific network IDs is OK. This test
short-circuits on a FlavorNotFound error.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(
webob.exc.HTTPBadRequest, self._create_server,
[{'uuid': 'e3b686a8-b91d-4a61-a3fc-1b74bb619ddb'},
{'uuid': 'e0f00941-f85f-46ec-9315-96ded58c2f14'}])
# make sure it was a flavor not found error and not something else
self.assertIn('Flavor 2 could not be found', six.text_type(ex))
def test_create_server_legacy_neutron_network_id_fails(self):
"""Tests that we no longer support the legacy br-<uuid> format for
a network id.
"""
uuid = 'br-00000000-0000-0000-0000-000000000000'
self.assertRaises(exception.ValidationError, self._create_server,
[{'uuid': uuid}])
@ddt.ddt
class ServersControllerCreateTestV252(test.NoDBTestCase):
def setUp(self):
super(ServersControllerCreateTestV252, self).setUp()
self.controller = servers.ServersController()
self.body = {
'server': {
'name': 'device-tagging-server',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': [{
'uuid': 'ff608d40-75e9-48cb-b745-77bb55b5eaf2'
}]
}
}
self.req = fakes.HTTPRequestV21.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID, version='2.52')
self.req.method = 'POST'
self.req.headers['content-type'] = 'application/json'
def _create_server(self, tags):
self.body['server']['tags'] = tags
self.req.body = jsonutils.dump_as_bytes(self.body)
return self.controller.create(self.req, body=self.body).obj['server']
def test_create_server_with_tags_pre_2_52_fails(self):
"""Negative test to make sure you can't pass 'tags' before 2.52"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.51')
self.assertRaises(
exception.ValidationError, self._create_server, ['tag1'])
@ddt.data([','],
['/'],
['a' * (tag.MAX_TAG_LENGTH + 1)],
['a'] * (instance_obj.MAX_TAG_COUNT + 1),
[''],
[1, 2, 3],
{'tag': 'tag'})
def test_create_server_with_tags_incorrect_tags(self, tags):
"""Negative test to incorrect tags are not allowed"""
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.52')
self.assertRaises(
exception.ValidationError, self._create_server, tags)
class ServersControllerCreateTestV257(test.NoDBTestCase):
"""Tests that trying to create a server with personality files using
microversion 2.57 fails.
"""
def test_create_server_with_personality_fails(self):
controller = servers.ServersController()
body = {
'server': {
'name': 'no-personality-files',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': 'auto',
'personality': [{
'path': '/path/to/file',
'contents': 'ZWNobyAiaGVsbG8gd29ybGQi'
}]
}
}
req = fakes.HTTPRequestV21.blank('/servers', version='2.57')
req.body = jsonutils.dump_as_bytes(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
ex = self.assertRaises(
exception.ValidationError, controller.create, req, body=body)
self.assertIn('personality', six.text_type(ex))
@mock.patch('nova.compute.utils.check_num_instances_quota',
new=lambda *args, **kwargs: 1)
class ServersControllerCreateTestV260(test.NoDBTestCase):
"""Negative tests for creating a server with a multiattach volume."""
def setUp(self):
super(ServersControllerCreateTestV260, self).setUp()
self.useFixture(nova_fixtures.NoopQuotaDriverFixture())
self.controller = servers.ServersController()
get_flavor_mock = mock.patch(
'nova.compute.flavors.get_flavor_by_flavor_id',
return_value=fake_flavor.fake_flavor_obj(
context.get_admin_context(), flavorid='1',
expected_attrs=['extra_specs']))
get_flavor_mock.start()
self.addCleanup(get_flavor_mock.stop)
reqspec_create_mock = mock.patch(
'nova.objects.RequestSpec.create')
reqspec_create_mock.start()
self.addCleanup(reqspec_create_mock.stop)
volume_get_mock = mock.patch(
'nova.volume.cinder.API.get',
return_value={'id': uuids.fake_volume_id, 'multiattach': True})
volume_get_mock.start()
self.addCleanup(volume_get_mock.stop)
def _post_server(self, version=None):
body = {
'server': {
'name': 'multiattach',
'flavorRef': '1',
'networks': 'none',
'block_device_mapping_v2': [{
'uuid': uuids.fake_volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': True}]
}
}
req = fakes.HTTPRequestV21.blank(
'/servers', version=version or '2.60')
req.body = jsonutils.dump_as_bytes(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
return self.controller.create(req, body=body)
def test_create_server_with_multiattach_fails_old_microversion(self):
"""Tests the case that the user tries to boot from volume with a
multiattach volume but before using microversion 2.60.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._post_server, '2.59')
self.assertIn('Multiattach volumes are only supported starting with '
'compute API version 2.60', six.text_type(ex))
class ServersControllerCreateTestV263(ServersControllerCreateTest):
def _create_instance_req(self, certs=None):
self.body['server']['trusted_image_certificates'] = certs
self.flags(verify_glance_signatures=True, group='glance')
self.flags(enable_certificate_validation=True, group='glance')
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.63')
def test_create_instance_with_trusted_certs(self):
"""Test create with valid trusted_image_certificates argument"""
self._create_instance_req(
['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_without_trusted_certs(self):
"""Test create without trusted image certificates"""
self._create_instance_req()
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_empty_trusted_cert_id(self):
"""Make sure we can't create with an empty certificate ID"""
self._create_instance_req([''])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_create_instance_with_empty_trusted_certs(self):
"""Make sure we can't create with an empty array of IDs"""
self.body['server']['trusted_image_certificates'] = []
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.63')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_create_instance_with_too_many_trusted_certs(self):
"""Make sure we can't create with an array of >50 unique IDs"""
self._create_instance_req(['cert{}'.format(i) for i in range(51)])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too long', six.text_type(ex))
def test_create_instance_with_nonunique_trusted_certs(self):
"""Make sure we can't create with a non-unique array of IDs"""
self._create_instance_req(['cert', 'cert'])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('has non-unique elements', six.text_type(ex))
def test_create_instance_with_invalid_trusted_cert_id(self):
"""Make sure we can't create with non-string certificate IDs"""
self._create_instance_req([1, 2])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_create_instance_with_invalid_trusted_certs(self):
"""Make sure we can't create with certificates in a non-array"""
self._create_instance_req("not-an-array")
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_create_server_with_trusted_certs_pre_2_63_fails(self):
"""Make sure we can't use trusted_certs before 2.63"""
self._create_instance_req(['trusted-cert-id'])
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.62')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
def test_create_server_with_trusted_certs_policy_failed(self):
rule_name = "os_compute_api:servers:create:trusted_certs"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
"os_compute_api:servers:create:attach_volume": "@",
"os_compute_api:servers:create:attach_network": "@",
rule_name: "project:fake"}
self._create_instance_req(['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8'])
self.policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body=self.body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(compute_api.API, 'create')
def test_create_server_with_cert_validation_error(
self, mock_create):
mock_create.side_effect = exception.CertificateValidationFailed(
cert_uuid="cert id", reason="test cert validation error")
self._create_instance_req(['trusted-cert-id'])
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req,
body=self.body)
self.assertIn('test cert validation error',
six.text_type(ex))
class ServersControllerCreateTestV267(ServersControllerCreateTest):
def setUp(self):
super(ServersControllerCreateTestV267, self).setUp()
self.block_device_mapping_v2 = [
{'uuid': '70a599e0-31e7-49b7-b260-868f441e862b',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'volume_size': '1',
'volume_type': 'fake-lvm-1'
}]
def _test_create_extra(self, *args, **kwargs):
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.67')
return super(ServersControllerCreateTestV267, self)._test_create_extra(
*args, **kwargs)
def test_create_server_with_trusted_volume_type_pre_2_67_fails(self):
"""Make sure we can't use volume_type before 2.67"""
self.body['server'].update(
{'block_device_mapping_v2': self.block_device_mapping_v2})
self.req.body = jsonutils.dump_as_bytes(self.block_device_mapping_v2)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.66')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn("'volume_type' was unexpected", six.text_type(ex))
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.VolumeTypeNotFound(
id_or_name='fake-lvm-1'))
def test_create_instance_with_volume_type_not_found(self, mock_create):
"""Trying to boot from volume with a volume type that does not exist
will result in a 400 error.
"""
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertIn('Volume type fake-lvm-1 could not be found',
six.text_type(ex))
def test_create_instance_with_volume_type_empty_string(self):
"""Test passing volume_type='' which is accepted but not used."""
self.block_device_mapping_v2[0]['volume_type'] = ''
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_with_none_volume_type(self):
"""Test passing volume_type=None which is accepted but not used."""
self.block_device_mapping_v2[0]['volume_type'] = None
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_without_volume_type(self):
"""Test passing without volume_type which is accepted but not used."""
self.block_device_mapping_v2[0].pop('volume_type')
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_with_volume_type_too_long(self):
"""Tests the maxLength schema validation on volume_type."""
self.block_device_mapping_v2[0]['volume_type'] = 'X' * 256
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
ex = self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertIn('is too long', six.text_type(ex))
class ServersControllerCreateTestV274(ServersControllerCreateTest):
def setUp(self):
super(ServersControllerCreateTestV274, self).setUp()
self.req.environ['nova.context'] = fakes.FakeRequestContext(
user_id='fake_user',
project_id=self.project_id,
is_admin=True)
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.scheduler.client.report.'
'SchedulerReportClient.get')).mock
def _generate_req(self, host=None, node=None, az=None,
api_version='2.74'):
if host:
self.body['server']['host'] = host
if node:
self.body['server']['hypervisor_hostname'] = node
if az:
self.body['server']['availability_zone'] = az
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest(api_version)
def test_create_instance_with_invalid_host(self):
self._generate_req(host='node-invalid')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
self.assertIn('Compute host node-invalid could not be found.',
six.text_type(ex))
def test_create_instance_with_non_string_host(self):
self._generate_req(host=123)
ex = self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
self.assertIn("Invalid input for field/attribute host.",
six.text_type(ex))
def test_create_instance_with_invalid_hypervisor_hostname(self):
get_resp = mock.Mock()
get_resp.status_code = 404
self.mock_get.return_value = get_resp
self._generate_req(node='node-invalid')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
self.assertIn('Compute host node-invalid could not be found.',
six.text_type(ex))
def test_create_instance_with_non_string_hypervisor_hostname(self):
get_resp = mock.Mock()
get_resp.status_code = 404
self.mock_get.return_value = get_resp
self._generate_req(node=123)
ex = self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
self.assertIn("Invalid input for field/attribute hypervisor_hostname.",
six.text_type(ex))
def test_create_instance_with_invalid_host_and_hypervisor_hostname(self):
self._generate_req(host='host-invalid', node='node-invalid')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
self.assertIn('Compute host host-invalid could not be found.',
six.text_type(ex))
def test_create_instance_with_non_string_host_and_hypervisor_hostname(
self):
self._generate_req(host=123, node=123)
ex = self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
self.assertIn("Invalid input for field/attribute",
six.text_type(ex))
def test_create_instance_pre_274(self):
self._generate_req(host='host', node='node', api_version='2.73')
ex = self.assertRaises(exception.ValidationError,
self.controller.create,
self.req, body=self.body)
self.assertIn("Invalid input for field/attribute server.",
six.text_type(ex))
def test_create_instance_mutual(self):
self._generate_req(host='host', node='node', az='nova:host:node')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req, body=self.body)
self.assertIn("mutually exclusive", six.text_type(ex))
def test_create_instance_invalid_policy(self):
self._generate_req(host='host', node='node')
# non-admin
self.req.environ['nova.context'] = fakes.FakeRequestContext(
user_id='fake_user',
project_id=fakes.FAKE_PROJECT_ID,
is_admin=False)
ex = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create,
self.req, body=self.body)
self.assertIn("Policy doesn't allow compute:servers:create:"
"requested_destination to be performed.",
six.text_type(ex))
def test_create_instance_private_flavor(self):
# Here we use admin context, so if we do not pass it or
# we do not anything, the test case will be failed.
pass
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestWithMock, self).setUp()
self.flags(enable_instance_password=True, group='api')
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
self.controller = servers.ServersController()
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID)
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_fixed_ip_already_in_use(self, create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
create_mock.side_effect = exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=network)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertEqual(1, len(create_mock.call_args_list))
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_invalid_fixed_ip(self, create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '999.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertFalse(create_mock.called)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidVolume(reason='error'))
def test_create_instance_with_invalid_volume_error(self, create_mock):
# Tests that InvalidVolume is translated to a 400 error.
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
class ServersViewBuilderTest(test.TestCase):
project_id = fakes.FAKE_PROJECT_ID
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
fakes.stub_out_nw_api(self)
self.flags(group='glance', api_servers=['http://localhost:9292'])
nw_cache_info = self._generate_nw_cache_info()
db_inst = fakes.stub_instance(
id=1,
image_ref="5",
uuid=FAKE_UUID,
display_name="test_server",
include_fake_metadata=False,
availability_zone='nova',
nw_cache=nw_cache_info,
launched_at=None,
terminated_at=None,
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])
self.stub_out('nova.db.api.'
'block_device_mapping_get_all_by_instance_uuids',
fake_bdms_get_all_by_instance_uuids)
self.stub_out('nova.objects.InstanceMappingList.'
'_get_by_instance_uuids_from_db',
fake_get_inst_mappings_by_instance_uuids_from_db)
self.uuid = db_inst['uuid']
self.view_builder = views.servers.ViewBuilder()
self.request = fakes.HTTPRequestV21.blank("/%s" % self.project_id)
self.request.context = context.RequestContext('fake', self.project_id)
self.instance = fake_instance.fake_instance_obj(
self.request.context,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
**db_inst)
self.self_link = "http://localhost/v2/%s/servers/%s" % (
self.project_id, self.uuid)
self.bookmark_link = "http://localhost/%s/servers/%s" % (
self.project_id, self.uuid)
def _generate_nw_cache_info(self):
fixed_ipv4 = ('192.168.1.100', '192.168.2.100', '192.168.3.100')
fixed_ipv6 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(fixed_ipv4[0])]},
{'cidr': 'b33f::/64',
'ips': [_ip(fixed_ipv6[0])]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.2.0/24',
'ips': [_ip(fixed_ipv4[1])]}]}},
{'address': 'cc:cc:cc:cc:cc:cc',
'id': 3,
'network': {'bridge': 'br0',
'id': 2,
'label': 'test2',
'subnets': [{'cidr': '192.168.3.0/24',
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance,
False)
self.assertEqual(result, expected)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_volumes_attached_with_faily_cells(self, mock_sg):
bdms = fake_bdms_get_all_by_instance_uuids()
# just faking a nova list scenario
mock_sg.return_value = {
uuids.cell1: bdms[0],
uuids.cell2: exception.BDMNotFound(id='fake')
}
ctxt = context.RequestContext('fake', fakes.FAKE_PROJECT_ID)
result = self.view_builder._get_instance_bdms_in_multiple_cells(
ctxt, [self.instance.uuid])
# will get the result from cell1
self.assertEqual(result, bdms[0])
mock_sg.assert_called_once()
def test_build_server(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
output = self.view_builder.basic(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/%s/images/5" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
image_bookmark = "http://localhost/%s/images/5" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"name": "test_server",
"status": "ERROR",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"fault": {
"code": 404,
"created": "2010-10-10T12:00:00Z",
"message": "HTTPNotFound",
"details": "Stock details for test",
},
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ERROR,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
self.request.context = context.RequestContext('fake', self.project_id)
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_that_has_been_deleted(self):
self.instance['deleted'] = 1
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "No valid host was found"}
self.request.context = context.RequestContext('fake', self.project_id)
output = self.view_builder.show(self.request, self.instance)
# Regardless of vm_state deleted servers should be DELETED
self.assertEqual("DELETED", output['server']['status'])
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_no_instance_mapping(self,
mock_im):
self.instance['vm_state'] = vm_states.ERROR
mock_im.side_effect = exception.InstanceMappingNotFound(uuid='foo')
self.request.context = context.RequestContext('fake', self.project_id)
self.view_builder.show(self.request, self.instance)
mock_im.assert_called_once_with(mock.ANY, self.uuid)
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_build_server_detail_with_fault_loaded(self, mock_im):
self.instance['vm_state'] = vm_states.ERROR
fault = fake_instance.fake_fault_obj(self.request.context,
self.uuid, code=500,
message="No valid host was found")
self.instance['fault'] = fault
self.request.context = context.RequestContext('fake', self.project_id)
self.view_builder.show(self.request, self.instance)
self.assertFalse(mock_im.called)
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.context = context.RequestContext('fake', self.project_id)
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error",
'details': 'Stock details for test'}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context,
self.uuid,
code=500,
message='Error',
details='')
expected_fault = {"code": 500,
"created": "2010-10-10T12:00:00Z",
"message": "Error"}
self.request.environ['nova.context'].is_admin = True
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output['server']['fault'],
matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
self.instance['fault'] = fake_instance.fake_fault_obj(
self.request.context, self.uuid)
output = self.view_builder.show(self.request, self.instance)
self.assertNotIn('fault', output['server'])
def test_build_server_detail_active_status(self):
# set the power state of the instance to running
self.instance['vm_state'] = vm_states.ACTIVE
self.instance['progress'] = 100
image_bookmark = "http://localhost/%s/images/5" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 100,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
metadata = []
metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
metadata = nova_utils.metadata_to_dict(metadata)
self.instance['metadata'] = metadata
image_bookmark = "http://localhost/%s/images/5" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected_server = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
"id": "1",
"links": [
{
"rel": "bookmark",
"href": flavor_bookmark,
},
],
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {"Open": "Stack"},
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": '',
"accessIPv6": '',
"OS-EXT-AZ:availability_zone": "nova",
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1'},
{'id': 'some_volume_2'},
]
}
}
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
class ServersViewBuilderTestV269(ServersViewBuilderTest):
"""Server ViewBuilder test for microversion 2.69
The intent here is simply to verify that when showing server details
after microversion 2.69 the response could have missing keys for those
servers from the down cells.
"""
wsgi_api_version = '2.69'
def setUp(self):
super(ServersViewBuilderTestV269, self).setUp()
self.view_builder = views.servers.ViewBuilder()
self.ctxt = context.RequestContext('fake', self.project_id)
def fake_is_supported(req, min_version="2.1", max_version="2.69"):
return (fakes.api_version.APIVersionRequest(max_version) >=
req.api_version_request >=
fakes.api_version.APIVersionRequest(min_version))
self.stub_out('nova.api.openstack.api_version_request.is_supported',
fake_is_supported)
def req(self, url, use_admin_context=False):
return fakes.HTTPRequest.blank(url,
use_admin_context=use_admin_context,
version=self.wsgi_api_version)
def test_get_server_list_detail_with_down_cells(self):
# Fake out 1 partially constructued instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
context=self.ctxt,
uuid=uuids.fake1,
project_id=fakes.FAKE_PROJECT_ID,
created_at=datetime.datetime(1955, 11, 5)
)
]
req = self.req('/%s/servers/detail' % self.project_id)
output = self.view_builder.detail(req, self.instances, True)
self.assertEqual(2, len(output['servers']))
image_bookmark = "http://localhost/%s/images/5" % self.project_id
expected = {
"servers": [{
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
"name": "test_server",
"status": "ACTIVE",
"hostId": '',
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
'disk': 1,
'ephemeral': 1,
'vcpus': 1,
'ram': 256,
'original_name': 'flavor1',
'extra_specs': {},
'swap': 0
},
"addresses": {
'test1': [
{'version': 4, 'addr': '192.168.1.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 6, 'addr': '2001:db8:0:1::1',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
{'version': 4, 'addr': '192.168.2.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'}
],
'test2': [
{'version': 4, 'addr': '192.168.3.100',
'OS-EXT-IPS:type': 'fixed',
'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'},
]
},
"metadata": {},
"tags": [],
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
"OS-DCF:diskConfig": "MANUAL",
"OS-EXT-SRV-ATTR:root_device_name": None,
"accessIPv4": '',
"accessIPv6": '',
"host_status": '',
"OS-EXT-SRV-ATTR:user_data": None,
"trusted_image_certificates": None,
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:kernel_id": '',
"OS-EXT-SRV-ATTR:reservation_id": '',
"config_drive": None,
"OS-EXT-SRV-ATTR:host": None,
"OS-EXT-SRV-ATTR:hypervisor_hostname": None,
"OS-EXT-SRV-ATTR:hostname": 'test_server',
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"key_name": '',
"locked": False,
"description": None,
"OS-SRV-USG:launched_at": None,
"OS-SRV-USG:terminated_at": None,
"security_groups": [{'name': 'default'}],
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": vm_states.ACTIVE,
"OS-EXT-STS:power_state": 1,
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": '',
"os-extended-volumes:volumes_attached": [
{'id': 'some_volume_1', 'delete_on_termination': True},
{'id': 'some_volume_2', 'delete_on_termination': False},
]
},
{
'created': '1955-11-05T00:00:00Z',
'id': uuids.fake1,
'tenant_id': fakes.FAKE_PROJECT_ID,
"status": "UNKNOWN",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/%s/servers/%s" %
(self.project_id, uuids.fake1),
},
{
"rel": "bookmark",
"href": "http://localhost/%s/servers/%s" %
(self.project_id, uuids.fake1),
},
],
}]
}
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_list_with_down_cells(self):
# Fake out 1 partially constructued instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
context=self.ctxt,
uuid=uuids.fake1,
project_id=fakes.FAKE_PROJECT_ID,
created_at=datetime.datetime(1955, 11, 5)
)
]
req = self.req('/%s/servers' % self.project_id)
output = self.view_builder.index(req, self.instances, True)
self.assertEqual(2, len(output['servers']))
expected = {
"servers": [{
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
]
},
{
'id': uuids.fake1,
"status": "UNKNOWN",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/%s/servers/%s" %
(self.project_id, uuids.fake1),
},
{
"rel": "bookmark",
"href": "http://localhost/%s/servers/%s" %
(self.project_id, uuids.fake1),
},
],
}]
}
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_with_down_cells(self):
# Fake out 1 partially constructued instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
project_id=self.instance.project_id,
created_at=datetime.datetime(1955, 11, 5),
user_id=self.instance.user_id,
image_ref=self.instance.image_ref,
power_state=0,
flavor=self.instance.flavor,
availability_zone=self.instance.availability_zone
)
req = self.req('/%s/servers/%s' % (self.project_id, FAKE_UUID))
output = self.view_builder.show(req, self.instance,
cell_down_support=True)
# ten fields from request_spec and instance_mapping
self.assertEqual(10, len(output['server']))
image_bookmark = "http://localhost/%s/images/5" % self.project_id
expected = {
"server": {
"id": self.uuid,
"user_id": "fake_user",
"tenant_id": "fake_project",
"created": '1955-11-05T00:00:00Z',
"status": "UNKNOWN",
"image": {
"id": "5",
"links": [
{
"rel": "bookmark",
"href": image_bookmark,
},
],
},
"flavor": {
'disk': 1,
'ephemeral': 1,
'vcpus': 1,
'ram': 256,
'original_name': 'flavor1',
'extra_specs': {},
'swap': 0
},
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-STS:power_state": 0,
"links": [
{
"rel": "self",
"href": "http://localhost/v2/%s/servers/%s" %
(self.project_id, self.uuid),
},
{
"rel": "bookmark",
"href": "http://localhost/%s/servers/%s" %
(self.project_id, self.uuid),
},
]
}
}
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_without_image_avz_user_id_set_from_down_cells(self):
# Fake out 1 partially constructued instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
project_id=self.instance.project_id,
created_at=datetime.datetime(1955, 11, 5),
user_id=None,
image_ref=None,
power_state=0,
flavor=self.instance.flavor,
availability_zone=None
)
req = self.req('/%s/servers/%s' % (self.project_id, FAKE_UUID))
output = self.view_builder.show(req, self.instance,
cell_down_support=True)
# nine fields from request_spec and instance_mapping
self.assertEqual(10, len(output['server']))
expected = {
"server": {
"id": self.uuid,
"user_id": "UNKNOWN",
"tenant_id": "fake_project",
"created": '1955-11-05T00:00:00Z',
"status": "UNKNOWN",
"image": "",
"flavor": {
'disk': 1,
'ephemeral': 1,
'vcpus': 1,
'ram': 256,
'original_name': 'flavor1',
'extra_specs': {},
'swap': 0
},
"OS-EXT-AZ:availability_zone": "UNKNOWN",
"OS-EXT-STS:power_state": 0,
"links": [
{
"rel": "self",
"href": "http://localhost/v2/%s/servers/%s" %
(self.project_id, self.uuid),
},
{
"rel": "bookmark",
"href": "http://localhost/%s/servers/%s" %
(self.project_id, self.uuid),
},
]
}
}
self.assertThat(output, matchers.DictMatches(expected))
class ServersAllExtensionsTestCase(test.TestCase):
"""Servers tests using default API router with all extensions enabled.
The intent here is to catch cases where extensions end up throwing
an exception because of a malformed request before the core API
gets a chance to validate the request and return a 422 response.
For example, AccessIPsController extends servers.Controller::
| @wsgi.extends
| def create(self, req, resp_obj, body):
| context = req.environ['nova.context']
| if authorize(context) and 'server' in resp_obj.obj:
| resp_obj.attach(xml=AccessIPTemplate())
| server = resp_obj.obj['server']
| self._extend_server(req, server)
we want to ensure that the extension isn't barfing on an invalid
body.
"""
def setUp(self):
super(ServersAllExtensionsTestCase, self).setUp()
self.app = compute.APIRouterV21()
@mock.patch.object(compute_api.API, 'create',
side_effect=test.TestingException(
"Should not reach the compute API."))
def test_create_missing_server(self, mock_create):
# Test create with malformed body.
req = fakes.HTTPRequestV21.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID)
req.method = 'POST'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_update_missing_server(self):
# Test update with malformed body.
req = fakes.HTTPRequestV21.blank(
'/%s/servers/1' % fakes.FAKE_PROJECT_ID)
req.method = 'PUT'
req.content_type = 'application/json'
body = {'foo': {'a': 'b'}}
req.body = jsonutils.dump_as_bytes(body)
with mock.patch('nova.objects.Instance.save') as mock_save:
res = req.get_response(self.app)
self.assertFalse(mock_save.called)
self.assertEqual(400, res.status_int)
class ServersInvalidRequestTestCase(test.TestCase):
"""Tests of places we throw 400 Bad Request from."""
def setUp(self):
super(ServersInvalidRequestTestCase, self).setUp()
self.controller = servers.ServersController()
def _invalid_server_create(self, body):
req = fakes.HTTPRequestV21.blank(
'/%s/servers' % fakes.FAKE_PROJECT_ID)
req.method = 'POST'
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_server_no_body(self):
self._invalid_server_create(body=None)
def test_create_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_server_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
def _unprocessable_server_update(self, body):
req = fakes.HTTPRequestV21.blank(
'/%s/servers/%s' % (fakes.FAKE_PROJECT_ID, FAKE_UUID))
req.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_no_body(self):
self._invalid_server_create(body=None)
def test_update_server_missing_server(self):
body = {'foo': {'a': 'b'}}
self._invalid_server_create(body=body)
def test_create_update_malformed_entity(self):
body = {'server': 'string'}
self._invalid_server_create(body=body)
# TODO(alex_xu): There isn't specified file for ips extension. Most of
# unittest related to ips extension is in this file. So put the ips policy
# enforcement tests at here until there is specified file for ips extension.
class IPsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(IPsPolicyEnforcementV21, self).setUp()
self.controller = ips.IPsController()
self.req = fakes.HTTPRequest.blank("/v2/%s" % fakes.FAKE_PROJECT_ID)
def test_index_policy_failed(self):
rule_name = "os_compute_api:ips:index"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:ips:show"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServersPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServersPolicyEnforcementV21, self).setUp()
self.useFixture(nova_fixtures.AllServicesCurrent())
self.controller = servers.ServersController()
self.req = fakes.HTTPRequest.blank('')
self.image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(servers.ServersController, '_get_instance')
def test_start_policy_failed(self, _get_instance_mock):
_get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:start"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._start_server,
self.req, FAKE_UUID, body={})
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_policy_failed_with_other_project(
self, _get_instance_mock):
_get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:trigger_crash_dump"
rule = {rule_name: "project_id:%(project_id)s"}
self.req.api_version_request =\
api_version_request.APIVersionRequest('2.17')
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_trigger_crash_dump,
self.req, FAKE_UUID, body={'trigger_crash_dump': None})
@mock.patch('nova.compute.api.API.trigger_crash_dump')
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_pass_with_same_project(
self, _get_instance_mock, trigger_crash_dump_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
_get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
self.controller._action_trigger_crash_dump(
self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
trigger_crash_dump_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_failed_with_other_user(
self, _get_instance_mock):
_get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._action_trigger_crash_dump,
self.req,
fakes.FAKE_UUID,
body={'trigger_crash_dump': None})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.compute.api.API.trigger_crash_dump')
@mock.patch.object(servers.ServersController, '_get_instance')
def test_trigger_crash_dump_overridden_policy_pass_with_same_user(
self, _get_instance_mock, trigger_crash_dump_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
_get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:trigger_crash_dump"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.req.api_version_request = (
api_version_request.APIVersionRequest('2.17'))
self.controller._action_trigger_crash_dump(
self.req, fakes.FAKE_UUID, body={'trigger_crash_dump': None})
trigger_crash_dump_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
def test_index_policy_failed(self):
rule_name = "os_compute_api:servers:index"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.index, self.req)
def test_detail_policy_failed(self):
rule_name = "os_compute_api:servers:detail"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.detail, self.req)
def test_detail_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:detail:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, True)
def test_index_get_tenants_policy_failed(self):
req = fakes.HTTPRequest.blank('')
req.GET["all_tenants"] = "True"
rule_name = "os_compute_api:servers:index:get_all_tenants"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller._get_servers, req, False)
@mock.patch.object(common, 'get_instance')
def test_show_policy_failed(self, get_instance_mock):
get_instance_mock.return_value = None
rule_name = "os_compute_api:servers:show"
rule = {rule_name: "project:non_fake"}
self._common_policy_check(
rule, rule_name, self.controller.show, self.req, FAKE_UUID)
@mock.patch.object(common, 'get_instance')
def test_delete_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:delete"
rule = {rule_name: "project_id:%(project_id)s"}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
@mock.patch('nova.compute.api.API.soft_delete')
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_pass_with_same_project(self,
get_instance_mock,
soft_delete_mock):
self.flags(reclaim_instance_interval=3600)
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:delete"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
self.controller.delete(self.req, fakes.FAKE_UUID)
soft_delete_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:delete"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self._common_policy_check(
rule, rule_name, self.controller.delete, self.req, FAKE_UUID)
@mock.patch('nova.compute.api.API.soft_delete')
@mock.patch('nova.api.openstack.common.get_instance')
def test_delete_overridden_policy_pass_with_same_user(self,
get_instance_mock,
soft_delete_mock):
self.flags(reclaim_instance_interval=3600)
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:delete"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
self.controller.delete(self.req, fakes.FAKE_UUID)
soft_delete_mock.assert_called_once_with(
self.req.environ['nova.context'], instance)
@mock.patch.object(common, 'get_instance')
def test_update_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'])
rule_name = "os_compute_api:servers:update"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'server': {'name': 'server_test'}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller.update, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch.object(compute_api.API, 'update_instance')
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_pass_with_same_project(
self, get_instance_mock, update_instance_mock, view_show_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:update"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
body = {'server': {'name': 'server_test'}}
self.controller.update(self.req, fakes.FAKE_UUID, body=body)
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:update"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller.update, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=False)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch.object(compute_api.API, 'update_instance')
@mock.patch.object(common, 'get_instance')
def test_update_overridden_policy_pass_with_same_user(self,
get_instance_mock,
update_instance_mock,
view_show_mock,
mock_port_check):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:update"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'server': {'name': 'server_test'}}
self.controller.update(self.req, fakes.FAKE_UUID, body=body)
def test_confirm_resize_policy_failed(self):
rule_name = "os_compute_api:servers:confirm_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_confirm_resize,
self.req, FAKE_UUID, body=body)
def test_revert_resize_policy_failed(self):
rule_name = "os_compute_api:servers:revert_resize"
rule = {rule_name: "project:non_fake"}
body = {'server': {'name': 'server_test'}}
self._common_policy_check(
rule, rule_name, self.controller._action_revert_resize,
self.req, FAKE_UUID, body=body)
def test_reboot_policy_failed(self):
rule_name = "os_compute_api:servers:reboot"
rule = {rule_name: "project:non_fake"}
body = {'reboot': {'type': 'HARD'}}
self._common_policy_check(
rule, rule_name, self.controller._action_reboot,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:resize"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'resize': {'flavorRef': '1'}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_resize, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=False)
@mock.patch('nova.compute.api.API.resize')
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_pass_with_same_project(self,
get_instance_mock,
resize_mock,
mock_post_check):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:resize"
self.policy.set_rules({rule_name: "project_id:%(project_id)s"})
body = {'resize': {'flavorRef': '1'}}
self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
resize_mock.assert_called_once_with(self.req.environ['nova.context'],
instance, '1',
auto_disk_config=None)
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:resize"
rule = {rule_name: "user_id:%(user_id)s"}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
body = {'resize': {'flavorRef': '1'}}
self._common_policy_check(
rule, rule_name, self.controller._action_resize, self.req,
FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=False)
@mock.patch('nova.compute.api.API.resize')
@mock.patch('nova.api.openstack.common.get_instance')
def test_resize_overridden_policy_pass_with_same_user(self,
get_instance_mock,
resize_mock,
mock_port_check):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:resize"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'resize': {'flavorRef': '1'}}
self.controller._action_resize(self.req, fakes.FAKE_UUID, body=body)
resize_mock.assert_called_once_with(self.req.environ['nova.context'],
instance, '1',
auto_disk_config=None)
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_policy_failed_with_other_project(self, get_instance_mock):
get_instance_mock.return_value = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
project_id=self.req.environ['nova.context'].project_id)
rule_name = "os_compute_api:servers:rebuild"
rule = {rule_name: "project_id:%(project_id)s"}
body = {'rebuild': {'imageRef': self.image_uuid}}
# Change the project_id in request context.
self.req.environ['nova.context'].project_id = 'other-project'
self._common_policy_check(
rule, rule_name, self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_overridden_policy_failed_with_other_user_in_same_project(
self, get_instance_mock):
get_instance_mock.return_value = (
fake_instance.fake_instance_obj(self.req.environ['nova.context']))
rule_name = "os_compute_api:servers:rebuild"
rule = {rule_name: "user_id:%(user_id)s"}
body = {'rebuild': {'imageRef': self.image_uuid}}
# Change the user_id in request context.
self.req.environ['nova.context'].user_id = 'other-user'
self._common_policy_check(
rule, rule_name, self.controller._action_rebuild,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.api.openstack.compute.views.servers.ViewBuilder.show')
@mock.patch('nova.compute.api.API.rebuild')
@mock.patch('nova.api.openstack.common.get_instance')
def test_rebuild_overridden_policy_pass_with_same_user(self,
get_instance_mock,
rebuild_mock,
view_show_mock):
instance = fake_instance.fake_instance_obj(
self.req.environ['nova.context'],
user_id=self.req.environ['nova.context'].user_id)
get_instance_mock.return_value = instance
rule_name = "os_compute_api:servers:rebuild"
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
body = {'rebuild': {'imageRef': self.image_uuid,
'adminPass': 'dumpy_password'}}
self.controller._action_rebuild(self.req, fakes.FAKE_UUID, body=body)
rebuild_mock.assert_called_once_with(self.req.environ['nova.context'],
instance,
self.image_uuid,
'dumpy_password')
def test_create_image_policy_failed(self):
rule_name = "os_compute_api:servers:create_image"
rule = {rule_name: "project:non_fake"}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rule, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_project(self,
mock_get_server,
mock_get_uuidi,
mock_is_vol_back):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on project
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "project:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(servers.ServersController, '_get_server')
def test_create_vol_backed_img_snapshotting_policy_blocks_role(self,
mock_get_server,
mock_get_uuidi,
mock_is_vol_back):
"""Don't permit a snapshot of a volume backed instance if configured
not to based on role
"""
rule_name = "os_compute_api:servers:create_image:allow_volume_backed"
rules = {
rule_name: "role:non_fake",
"os_compute_api:servers:create_image": "",
}
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self._common_policy_check(
rules, rule_name, self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
def _create_policy_check(self, rules, rule_name):
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': flavor_ref,
'availability_zone': "zone1:host1:node1",
'block_device_mapping': [{'device_name': "/dev/sda1"}],
'networks': [{'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}],
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self._common_policy_check(
rules, rule_name, self.controller.create, self.req, body=body)
def test_create_policy_failed(self):
rule_name = "os_compute_api:servers:create"
rules = {rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_forced_host_policy_failed(self):
rule_name = "os_compute_api:servers:create:forced_host"
rule = {"os_compute_api:servers:create": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rule, rule_name)
def test_create_attach_volume_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_volume"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
def test_create_attach_attach_network_policy_failed(self):
rule_name = "os_compute_api:servers:create:attach_network"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
"os_compute_api:servers:create:attach_volume": "@",
rule_name: "project:non_fake"}
self._create_policy_check(rules, rule_name)
class ServersActionsJsonTestV239(test.NoDBTestCase):
def setUp(self):
super(ServersActionsJsonTestV239, self).setUp()
self.controller = servers.ServersController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(common, 'get_instance')
def test_server_create_image_no_quota_checks(self, mock_get_instance,
mock_check_quotas):
# 'mock_get_instance' helps to skip the whole logic of the action,
# but to make the test
mock_get_instance.side_effect = webob.exc.HTTPNotFound
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_create_image, self.req,
FAKE_UUID, body=body)
# starting from version 2.39 no quota checks on Nova side are performed
# for 'createImage' action after removing 'image-metadata' proxy API
mock_check_quotas.assert_not_called()
|
apache-2.0
| 1,245,279,963,337,041,000
| 43.045867
| 79
| 0.557206
| false
| 3.939177
| true
| false
| false
|
hansehe/Wind-Blade-Inspection
|
TestUnits/Test_src/Test_DroneVision/Test_DroneVision_src/Test_imgProcessing/Test_Heading/Test_EdgeHeading.py
|
1
|
4905
|
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
################### UNIT TEST ########################
import unittest
from Settings.TestData import TestData
from TestUnits.Test_main import Test_main
'''
@brief Test unit for EdgeHeading
'''
class Test_EdgeHeading(unittest.TestCase, Test_main, TestData):
def setUp(self):
'''
@brief Give all setups to the unit test.
'''
self.SetAllKey()
self.InitTestData()
#### IMPORTS #####
from Settings import Settings
from src.DroneVision.DroneVision_src.imgProcessing.Heading import EdgeHeading
from src.DroneVision.DroneVision_src.imgProcessing.featureDetection.PointDetection import PointDetection
self.Settings = Settings
self.EdgeHeading = EdgeHeading
self.PointDetection = PointDetection
##################
def tearDown(self):
'''
@brief Give all tear down steps.
Is runned even if the test failed.
'''
pass
def test_EdgeHeading(self):
'''
@brief Main start test function.
Append functions to test for this unit.
'''
###### START TEST #####
for folder, left_frames, right_frames, actual_distances, baselines, use_set in self.GetFrameSets():
if use_set:
for fn_frame, fn_slframe in left_frames:
self.TestEdgeHeading(folder, fn_frame, fn_slframe)
###########################
def TestEdgeHeading(self, folder, fn_frame, fn_slframe):
'''
@brief Test function for EdgeHeading unit.
@param folder Input folder
@param fn_frame Frame filename without points.
@param fn_slframe Frame filename with points.
'''
import timeit
import numpy as np
from src.DroneVision.DroneVision_src.hardware.imageTools import GetImage, MatplotShow
print '\n'
print '#----------- TESTING EDGE HEADING PROCESSING \t---------------#'
print '#----------- Image without points: {0} \t---------------#'.format(fn_frame)
print '#----------- Image with points: {0} \t---------------#'.format(fn_slframe)
settings_inst = self.Settings.Settings()
fn_frame = folder + fn_frame
fn_slframe = folder + fn_slframe
delay = timeit.default_timer()
frame = GetImage(fn_frame)
sl_frame = GetImage(fn_slframe)
print 'Delay reading images: {0} sec'.format(timeit.default_timer() - delay)
edgeHeading = self.EdgeHeading.EdgeHeading()
pointDet = self.PointDetection.PointDetection(True, settings_inst.GetSettings())
pointDet.CalibratePointDetection()
print 'Min distance between blobs: {0}'.format(pointDet.GetMinDistanceBetweenBlobs())
total_delay = timeit.default_timer()
delay = timeit.default_timer()
delta_frame, point_kp, blob_desc, frame_un, sl_frame_un = pointDet.GetPointList(frame, sl_frame, draw=True)
print 'Delay for blob point detection: {0} sec, detected blobs: {1}'.format(timeit.default_timer() - delay, len(point_kp))
delay = timeit.default_timer()
hough_frame, edgel_map_filtered, boundary_hough_lines = pointDet.GetBoundaryHoughLines(frame_un, delta_frame, point_kp, draw=True, print_hough_positions=True)
print 'Delay for finding boundary edges (filtered) + lines: {0} sec'.format(timeit.default_timer() - delay)
delay = timeit.default_timer()
selected_hor_edge_heading, selected_vert_edge_heading, possible_hor_edge_headings, possible_vert_edge_headings = edgeHeading.ComputeEdgeHeading(edgel_map_filtered, boundary_hough_lines, draw=False)
print 'Delay for finding edge heading angle: {0} sec, hor_edge_heading = {1}, vert_edge_heading = {2}'.format(timeit.default_timer() - delay, selected_hor_edge_heading, selected_vert_edge_heading)
timeout = timeit.default_timer() - total_delay
print 'Total delay for downscaling + undistort + blob + hough lines + bounded lines + edge heading: {0} sec'.format(timeout)
edgel_map_filtered_all_headings = np.array(edgel_map_filtered, dtype=edgel_map_filtered.dtype)
selected_hor_edge_heading, selected_vert_edge_heading, possible_hor_edge_headings, possible_vert_edge_headings, edgel_map_filtered_all_headings = edgeHeading.ComputeEdgeHeading(edgel_map_filtered_all_headings, boundary_hough_lines, draw=True)
touple_frames = []
#touple_frames.append(('SL frame', sl_frame))
#touple_frames.append(('SL undistorted', sl_frame_un))
#touple_frames.append(('Original points', delta_frame))
#touple_frames.append(('Hough lines', hough_frame))
#touple_frames.append(('Selected edge heading', edgel_map_filtered))
touple_frames.append(('Possible edge headings', edgel_map_filtered_all_headings))
print 'max_hor = BLUE, min_hor = RED, max_vert = PURPLE, min_vert = GREEN'
if not(self.CheckAllTests()):
MatplotShow(touple_frames, fn_frame+'_Edge_heading_test', savefig_folder=self.savefig_folder+'edge_heading_test/', save_fig=self.save_figs, save_fig_only=self.save_figs_only, inlude_main_title_in_plot=False)
|
mit
| 4,514,450,617,151,839,000
| 42.035088
| 244
| 0.707441
| false
| 3.199609
| true
| false
| false
|
lddias/python-avs
|
debug.py
|
1
|
1030
|
import time
def fake_mic(logger, q, mic_stopped):
time.sleep(60)
logger.debug("TRIGGERED")
class StoppableAudioStreamLike:
def __init__(self, file):
self._f = file
self._eof = False
self._last_byte = None
def read(self, size=-1):
if mic_stopped.is_set():
logger.info("MIC STOP REQUESTED")
mic_stopped.clear()
return b''
if self._eof:
ret = self._last_byte
else:
ret = self._f.read(size)
if len(ret) < size:
self._last_byte = ret[-1:]
self._eof = True
ret += ret[-1:] * (size - len(ret))
assert len(ret) == size
return ret
q.put(('hotword', StoppableAudioStreamLike(open('flashbriefing2.wav', 'rb')), mic_stopped))
def fake_mic2(logger, q, mic_stopped):
time.sleep(3)
logger.debug("TRIGGERED")
q.put(('hotword', open('timer.wav', 'rb'), None))
|
mit
| -3,517,990,759,981,812,000
| 27.611111
| 95
| 0.495146
| false
| 3.705036
| false
| false
| false
|
tristandb/CaDaBata
|
cadabata.py
|
1
|
1545
|
from eca import *
from eca.generators import start_offline_tweets
import datetime
@event('init')
def setup(ctx, e):
'''The code that will be executed at initialization: starting the offline tweet stream.'''
start_offline_tweets('cadabata_static/batatweets.txt', 'tweet', time_factor=100000, arff_file='classifiers/bata_2014_classifier.arff')
@event('tweet')
def tweet(ctx, e):
'''The code that will be excecuted when a tweet is received.'''
# The tweet data.
tweet = e.data
# Rename the classification of the tweet.
tweetclass = classify_tweet(tweet['extra']['class_predicted_by: NaiveBayes']);
# Parse the time and date of the tweet. This has to be done with '{}'.format(), otherwise
# it can't be JSON encoded.
time = '{}'.format(datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S %z %Y'))
# Print to the console, so we know something is happening.
print('Tweet classified (and emitted) as:',tweetclass)
# Emit to the right handler.
emit('tweet_'+tweetclass, e.data)
# Emit to the graph.
emit('tweet_flot', {
'action': 'add',
'series': tweetclass,
'time': time,
'value': 1
});
def classify_tweet(cls):
'''Rename the classifications according to cls. Default is neutral.'''
o = 'neutral'
if cls == 'T':
o = 'positive'
elif cls == 'N':
o = 'neutral'
elif cls == 'F':
o = 'negative'
elif cls == 'A':
o = 'alert'
return o
|
mit
| 3,844,286,205,517,304,000
| 27.090909
| 138
| 0.604531
| false
| 3.584687
| false
| false
| false
|
jeffposnick/chromium-dashboard
|
common.py
|
1
|
6649
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import datetime
import json
import logging
import webapp2
# App Engine imports.
from google.appengine.api import users
from django.template.loader import render_to_string
from django.utils import feedgenerator
import models
import settings
class BaseHandler(webapp2.RequestHandler):
def __init__(self, request, response):
self.initialize(request, response)
# Add CORS and Chrome Frame to all responses.
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
self.response.headers.add_header('X-UA-Compatible', 'IE=Edge,chrome=1')
# Settings can't be global in python 2.7 env.
logging.getLogger().setLevel(logging.DEBUG)
class JSONHandler(BaseHandler):
def __truncate_day_percentage(self, data):
# Need 6 decimals b/c num will by mutiplied by 100 to get a percentage.
data.day_percentage = float("%.*f" % (6, data.day_percentage))
return data
def _is_googler(self, user):
return user and user.email().endswith('@google.com')
def _clean_data(self, data):
user = users.get_current_user()
# Show raw day percentage numbers if user is a googler.
if not self._is_googler(user):
data = map(self.__truncate_day_percentage, data)
return data
def get(self, data, formatted=False, public=True):
cache_type = 'public'
if not public:
cache_type = 'private'
# Cache script generated json responses.
self.response.headers['Cache-Control'] = '%s, max-age=%s' % (
cache_type, settings.DEFAULT_CACHE_TIME)
self.response.headers['Content-Type'] = 'application/json;charset=utf-8'
if formatted:
return self.response.write(json.dumps(data, separators=(',',':')))
else:
data = [entity.to_dict() for entity in data]
return self.response.write(json.dumps(data, separators=(',',':')))
class ContentHandler(BaseHandler):
def _is_user_whitelisted(self, user):
if not user:
return False
is_whitelisted = False
if users.is_current_user_admin():
is_whitelisted = True
elif user.email().endswith('@chromium.org'):
is_whitelisted = True
else:
# TODO(ericbidelman): memcache user lookup.
query = models.AppUser.all(keys_only=True).filter('email =', user.email())
found_user = query.get()
if found_user is not None:
is_whitelisted = True
return is_whitelisted
def _add_common_template_values(self, d):
"""Mixin common values for templates into d."""
template_data = {
'prod': settings.PROD,
'APP_TITLE': settings.APP_TITLE,
'current_path': self.request.path,
'VULCANIZE': settings.VULCANIZE
}
user = users.get_current_user()
if user:
template_data['login'] = (
'Logout', users.create_logout_url(dest_url=self.request.path))
template_data['user'] = {
'is_whitelisted': self._is_user_whitelisted(user),
'is_admin': users.is_current_user_admin(),
'email': user.email(),
}
else:
template_data['user'] = None
template_data['login'] = (
'Login', users.create_login_url(dest_url=self.request.path))
d.update(template_data)
def render(self, data={}, template_path=None, status=None, message=None,
relpath=None):
if status is not None and status != 200:
self.response.set_status(status, message)
# Add common template data to every request.
self._add_common_template_values(data)
try:
self.response.out.write(render_to_string(template_path, data))
except Exception:
handle_404(self.request, self.response, Exception)
def render_atom_feed(self, title, data):
features_url = '%s://%s%s' % (self.request.scheme,
self.request.host,
self.request.path.replace('.xml', ''))
feature_url_prefix = '%s://%s%s' % (self.request.scheme,
self.request.host,
'/feature')
feed = feedgenerator.Atom1Feed(
title=unicode('%s - %s' % (settings.APP_TITLE, title)),
link=features_url,
description=u'New features exposed to web developers',
language=u'en'
)
for f in data:
pubdate = datetime.datetime.strptime(str(f['updated'][:19]),
'%Y-%m-%d %H:%M:%S')
feed.add_item(
title=unicode(f['name']),
link='%s/%s' % (feature_url_prefix, f.get('id')),
description=f.get('summary', ''),
pubdate=pubdate,
author_name=unicode(settings.APP_TITLE),
categories=[f['category']]
)
self.response.headers.add_header('Content-Type',
'application/atom+xml;charset=utf-8')
self.response.out.write(feed.writeString('utf-8'))
def handle_401(request, response, exception):
ERROR_401 = (
'<style>'
'body { padding: 2em; }'
'h1, h2 { font-weight: 300; font-family: "Roboto", sans-serif; }\n'
'</style>\n'
'<title>401 Unauthorized</title>\n'
'<h1>Error: Unauthorized</h1>\n'
'<h2>User does not have permission to view this page.</h2>')
response.write(ERROR_401)
response.set_status(401)
def handle_404(request, response, exception):
ERROR_404 = (
'<style>'
'body { padding: 2em; }'
'h1, h2 { font-weight: 300; font-family: "Roboto", sans-serif; }\n'
'</style>\n'
'<title>404 Not Found</title>\n'
'<h1>Error: Not Found</h1>\n'
'<h2>The requested URL was not found on this server.'
'</h2>')
response.write(ERROR_404)
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
ERROR_500 = (
'<style>'
'body { padding: 2em; }'
'h1, h2 { font-weight: 300; font-family: "Roboto", sans-serif; }\n'
'</style>\n'
'<title>500 Internal Server Error</title>\n'
'<h1>Error: 500 Internal Server Error</h1>')
response.write(ERROR_500)
response.set_status(500)
|
apache-2.0
| 5,276,832,687,915,904,000
| 30.813397
| 80
| 0.630772
| false
| 3.563237
| false
| false
| false
|
sdrdis/iarpa_contest_submission
|
lib_exec/StereoPipeline/libexec/asp_cmd_utils.py
|
1
|
6816
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
"""IrgSystemFunctions.py - General systems related utilities"""
import sys, os, re, shutil, subprocess, string, time, errno, multiprocessing
def isCmdOption(arg):
"""Returns True if the string is a command line option,
False otherwise (if it is an argument)"""
# An option must start with '-' and not consist of all numbers
if ( arg.startswith('-') and not re.match('^-[0-9.]+$', arg) ):
return True
else:
return False
# The following functions are useful for going between string and list
# representations of command line arguments
def isNotString(a):
"""Returns true if the object is not a string"""
return (not isinstance(a, basestring))
def argListToString(argList):
"""Converts a list of arguments into a single argument string"""
string = ""
for arg in argList:
stringVersion = str(arg)
# Wrap arguments with spaces in them in "" so they stay together
if stringVersion.find(' ') >= 0:
string = string + '"' + stringVersion + '" '
else:
string = string + stringVersion + ' '
return string
def stringToArgList(string):
"""Converts a single argument string into a list of arguments"""
return string.split(" ")
# TODO: Improve this function a bit
def executeCommand(cmd,
outputPath=None, # If given, throw if the file is not created. Don't run if it already exists.
suppressOutput=False, # If true, don't print anything!
force=False): # If true , run even if outputPath already exists.
'''Executes a command with multiple options'''
if cmd == '': # An empty task
return
# Convert the input to list format if needed
if not isNotString(cmd):
cmd = stringToArgList(cmd)
# Run the command if conditions are met
if force or (not outputPath) or (not os.path.exists(outputPath)):
if suppressOutput: # Process silently
FNULL = open(os.devnull, 'w')
subprocess.call(cmd, stdout=FNULL, stderr=subprocess.STDOUT)
else: # Display output
print cmd
subprocess.call(cmd)
# Optionally check that the output file was created
if outputPath and (not os.path.exists(outputPath)):
raise CmdRunException('Failed to create output file: ' + outputPath)
return True
#==================================================
# This class implements a variant of OptionParser which ignores unknown options.
from optparse import (OptionParser,BadOptionError,AmbiguousOptionError)
class PassThroughOptionParser(OptionParser):
# Overwrite the default implementation which deletes newlines
def format_epilog(self, formatter):
return self.epilog
def _process_args(self, largs, rargs, values):
while rargs:
try:
self._process_args2(largs,rargs,values)
except (BadOptionError,AmbiguousOptionError) as e: # On failure, pass option to output list
if sys.version_info < (2, 6, 0):
# Port to Python 2.4
p = re.match("^.*?no such option:\s*(.*?)$", e.msg)
if p:
largs.append(p.group(1))
else:
largs.append(e.opt_str)
# This version of the function successfully passes through negative numbers
def _process_args2(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
p = re.match('^-[0-9.]+$', arg) # Identify a numeric argument
if p:
del rargs[0]
raise BadOptionError(arg)
#self.error(_("%s unrecognized number in arguments") % arg)
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
OptionParser._process_long_opt(self, rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
OptionParser._process_short_opts(self, rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
|
mit
| -4,660,527,434,852,196,000
| 37.948571
| 119
| 0.59375
| false
| 4.204812
| false
| false
| false
|
f-frhs/queequeg
|
document.py
|
1
|
5023
|
#!/usr/bin/env python
## $Id: document.py,v 1.2 2003/07/27 13:54:05 euske Exp $
##
## document.py - Document analyzer (HTML/TeX/PlainText)
##
import re, sys
from texparser import TexParser, TexTokenizer
from sgmllib_rev import SGMLParser
from abstfilter import AbstractFeeder, AbstractFilter, AbstractConsumer
class HTMLProcessor(SGMLParser, AbstractFeeder):
def __init__(self, next_filter):
AbstractFeeder.__init__(self, next_filter)
SGMLParser.__init__(self)
self.t = 0
self.ok = 1
return
def handle_data(self, s):
if not self.ok:
return
if s:
self.feed_next(s)
self.t = 1
return
def newline(self):
if self.t:
self.t = 0
self.feed_next(None)
return
def do_p(self, attrs):
self.newline()
return
def do_br(self, attrs):
self.newline()
return
def do_th(self, attrs):
self.newline()
return
def do_td(self, attrs):
self.newline()
return
def do_li(self, attrs):
self.newline()
return
def do_hr(self, attrs):
self.newline()
return
def do_h1(self, attrs):
self.newline()
return
def do_h2(self, attrs):
self.newline()
return
def do_h3(self, attrs):
self.newline()
return
def do_h4(self, attrs):
self.newline()
return
def do_h5(self, attrs):
self.newline()
return
def do_h6(self, attrs):
self.newline()
return
def do_h5(self, attrs):
self.newline()
return
def start_style(self, attrs):
self.ok = 0
return
def end_style(self):
self.ok = 1
return
def start_script(self, attrs):
self.ok = 0
return
def end_script(self):
self.ok = 1
return
def close(self):
SGMLParser.close(self)
AbstractFeeder.close(self)
return
def read(self, f):
while 1:
s = f.readline()
if not s: break
self.feed(s)
self.close()
return
class TexProcessor(TexParser, AbstractFeeder):
def __init__(self, next_filter):
AbstractFeeder.__init__(self, next_filter)
TexParser.__init__(self)
self.next_paragraph = 0
self.t = 0
return
def process_paragraph(self):
if self.t:
self.feed_next(None)
self.next_paragraph = 0
return
def handle_data(self, data):
data1 = data.strip()
if not data1:
self.next_paragraph = 1
if self.next_paragraph:
self.process_paragraph()
if data1:
self.t = 1
self.feed_next(data)
return
def do_documentclass(self, arg):
return
def do_usepackage(self, arg):
return
def do_bibliography(self, arg):
return
def do_includegraphics(self, arg):
return
def do_cite(self, arg):
return
def do_ref(self, arg):
return
def do_label(self, arg):
return
def do_unknown_command(self, cmd):
return
def begin_tabular(self,arg):
return
def end_tabular(self):
return
# do not consider inline math expressions as individual sentences.
def begin_math(self):
return
def end_math(self):
return
def start_title(self):
self.next_paragraph = 1
return
def start_chapter(self):
self.next_paragraph = 1
return
def startchapter_a(self):
self.next_paragraph = 1
return
def startsection(self):
self.next_paragraph = 1
return
def startsection_a(self):
self.next_paragraph = 1
return
def startsubsection(self):
self.next_paragraph = 1
return
def startsubsection_a(self):
self.next_paragraph = 1
return
def startsubsubsection(self):
self.next_paragraph = 1
return
def startsubsubsection_a(self):
self.next_paragraph = 1
return
def do_tablesep(self):
self.next_paragraph = 1
return
def do_linebreak(self):
self.next_paragraph = 1
return
def do_item(self):
self.next_paragraph = 1
return
def begin_unknown_environment(self, env):
self.next_paragraph = 1
return
def close(self):
AbstractFeeder.close(self)
TexParser.close(self)
if self.next_paragraph:
self.process_paragraph()
return
def read(self, f):
tokenizer = TexTokenizer(f)
while 1:
t = tokenizer.get()
# print repr(t)
if not t: break
self.feed(t)
self.close()
return
class PlainTextProcessor(AbstractFeeder):
def __init__(self, next_filter):
AbstractFeeder.__init__(self, next_filter)
self.t = 0
return
def read(self, f):
while 1:
s = f.readline()
if not s: break
if not s.strip() and self.t:
self.feed_next(None)
else:
self.t = 1
self.feed_next(s)
self.close()
return
# main
if __name__ == "__main__":
class Consumer(AbstractConsumer):
def feed(self, s):
if s == None:
print "-"
else:
print repr(s)
return
if sys.argv[1] == "-t":
proc = TexProcessor
elif sys.argv[1] == "-l":
proc = HTMLProcessor
elif sys.argv[1] == "-p":
proc = PlainTextProcessor
else:
assert 0
proc(Consumer()).read(sys.stdin)
|
gpl-2.0
| 445,269,795,249,341,760
| 18.93254
| 71
| 0.614175
| false
| 3.380215
| false
| false
| false
|
akelm/YAMS
|
yams/wzm_layer.py
|
1
|
3735
|
import numpy as np
from scipy import special
from sum_conv import sum_conv
def wzm_layer(ME1,MM1, MEdd, MMdd,Lambda,odl, Ceps,pin, taun, bn1mat,settings):
## wzmocnienie pola w srodku warstwy
# (Cst{1}.ME,Cst{1}.MM, Cst{dd}.ME, Cst{dd}.MM,...
# lambda, dip_pos ,Cepsilon{dd},theta,stPinTaun )
nNbtheta=pin.shape[1]
theta=np.linspace(0,np.pi,nNbtheta)[None,:,None]
(Ecr_j,Ect_j,Esf_j)=PweEgenThetaAllPhi(Lambda,Ceps,\
bn1mat*(MMdd[:,:,0,0] - MMdd[:,:,0,1]*MM1[:,:,1,0]/MM1[:,:,1,1])[:,:,None],\
bn1mat*(MEdd[:,:,0,0] - MEdd[:,:,0,1]*ME1[:,:,1,0]/ME1[:,:,1,1])[:,:,None],\
odl,theta,'j',pin,taun,settings) # [L x 1 x T]
(Ecr_h,Ect_h,Esf_h)=PweEgenThetaAllPhi(Lambda,Ceps,\
bn1mat*(MMdd[:,:,1,0]- MMdd[:,:,1,1]*MM1[:,:,1,0]/MM1[:,:,1,1])[:,:,None],\
bn1mat*(MEdd[:,:,1,0] - MEdd[:,:,1,1]*ME1[:,:,1,0]/ME1[:,:,1,1])[:,:,None],\
odl,theta,'h1',pin,taun,settings) # [L x 1 x T]
Fexcperp= 3/2*np.matmul(np.absolute(Ecr_j+Ecr_h)**2, np.sin(theta)) \
/np.sum(np.sin(theta)) # L
# print(np.max(np.abs(MEdd[:,:,1,0]- MEdd[:,:,1,1]*ME1[:,:,1,0]/ME1[:,:,1,1]))) # L
Fexcpara = 3/4*(np.matmul(np.absolute(Ect_j+Ect_h)**2 + np.absolute(Esf_j+Esf_h)**2, \
np.sin(theta)) ) /np.sum(np.sin(theta))
return (Fexcperp[:,0,0],Fexcpara[:,0,0])
def PweEgenThetaAllPhi(Lambda,epsilon,cn1,dn1,r0,theta,sBessel,pin,taun,settings):
nNmax=cn1.shape[1]
nm1=np.arange(0,nNmax+1)[None,:,None] # 1 x nNmax+1
n=nm1[:,1:,:] # 1 x nNmax
cffnr=np.sqrt((2*n+1)/(4*np.pi)) # 1 x nNmax
mun=cffnr/(n*(n+1)) # 1 x nNmax
if r0==0:
Esf= (dn1[:,0,:]/np.sqrt(3*np.pi))[:,None]
Ecr=-Esf * np.sin(theta)
Ect=-Esf * np.cos(theta)
else:
# get Zn(rho) for radial dependence and derived functions
if np.isinf(r0):
# for far-field radiation profile
dn1Z1=0 # [L x 1]
icn1Z0=cn1 # [L x nNmax]
dn1Z2=dn1 # [L x nNmax]
mun=mun*((-1j)**(n+1)) # 1 x nNmax
else:
rho=(2*np.pi* np.sqrt(epsilon)/Lambda*r0)[:,:,None] # column [L x 1]
f=special.spherical_jn(nm1,rho) # [L x nNmax+1]
if sBessel=='h1':
f=f+1j*special.spherical_yn(nm1,rho) # [L x nNmax+1]
stZnAll_Z0=f[:,1:,:] # [L x nNmax]
stZnAll_Z1=stZnAll_Z0/rho # [L x nNmax]
stZnAll_Z2=f[:,:-1,:] - nm1[:,1:,:]*stZnAll_Z1 # [L x nNmax]
dn1Z1=dn1*stZnAll_Z1 # [L x nNmax]
icn1Z0=1j*cn1*stZnAll_Z0 # [L x nNmax]
dn1Z2=dn1*stZnAll_Z2 # [L x nNmax]
# pin 1 x T x N
# vecNdep=dn1Z1*cffnr # [L x nNmax x 1]
# Ersum=np.matmul(pin,vecNdep)
vecNdep=(dn1Z1*cffnr).swapaxes(1,2) # [L x 1 x nNmax]
Ersum=sum_conv(pin*vecNdep,2,settings)
# vecNdep=icn1Z0*mun # [L x nNmax]
# vecNdep2=dn1Z2*mun # [L x nNmax]
vecNdep=(icn1Z0*mun).swapaxes(1,2) # [L x 1 x nNmax]
vecNdep2=(dn1Z2*mun).swapaxes(1,2) # [L x 1 x nNmax]
# tmp1=np.matmul(pin, vecNdep)
# tmp2=np.matmul(taun, vecNdep2)
tmp1=sum_conv(pin*vecNdep,2,settings)
tmp2=sum_conv(taun*vecNdep2,2,settings)
Etsum=tmp1+tmp2
# tmp1=np.matmul(taun, vecNdep)
# tmp2=np.matmul(pin, vecNdep2)
tmp1=sum_conv(pin*vecNdep2,2,settings)
tmp2=sum_conv(taun*vecNdep,2,settings)
Efsum=tmp1+tmp2
Ecr=-2*np.sin(theta)*Ersum
Ect=-2*Etsum # corresponds to S_2 if r0==Inf
Esf=2*Efsum # corresponds to (-S_1) if r0==Inf
return (np.swapaxes(Ecr,1,2),np.swapaxes(Ect,1,2),np.swapaxes(Esf,1,2)) # Lx1xT
|
gpl-3.0
| -6,966,585,909,532,253,000
| 37.90625
| 90
| 0.538956
| false
| 2.276051
| false
| false
| false
|
epam/DLab
|
infrastructure-provisioning/src/general/scripts/azure/ssn_create_vpc.py
|
1
|
1571
|
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import argparse
from dlab.actions_lib import *
from dlab.meta_lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--vpc_name', type=str, default='')
parser.add_argument('--resource_group_name', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--vpc_cidr', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
if args.vpc_name != '':
if AzureMeta().get_vpc(args.resource_group_name, args.vpc_name):
print("REQUESTED VIRTUAL NETWORK {} EXISTS".format(args.vpc_name))
else:
print("Creating Virtual Network {}".format(args.vpc_name))
AzureActions().create_vpc(args.resource_group_name, args.vpc_name, args.region, args.vpc_cidr)
else:
sys.exit(1)
|
apache-2.0
| 2,222,215,809,739,852,500
| 37.317073
| 106
| 0.623806
| false
| 3.957179
| false
| false
| false
|
arunchandramouli/fanofpython
|
code/features/datatypes/dicts.py
|
1
|
3592
|
'''
Aim :: To demonstrate the use of a dictionary
Define a simple dictionary , add values to it and iterate and print it
Dictionary works based on hashing principle ... simply said key,value pairs
** A dictionary object is a mutable datatype which means it couldnt be hashed
Anything that cant be hashed cant be set as a dictionary key
'''
'''
An Empty Dict
'''
dictA = dict() # same as dictA = {}
dictB = dict() # same as dictB = {}
'''
Adding values to it
'''
for i in [1,3,2,4,8,9,5,6,7]:
dictB[i] = i
'''
Adding values to a dict
'''
# Let us use a simple for loop to add values to a dict
for i in xrange(10):
dictA[i] = i + 10
print dictA,'\n\n',dictB,'\n\n'
'''
Adding same keys to the dict
When @ run-time it see similar keys, the former key will be removed
and the latter will be retained with its value.
'''
for i in [11,11,14,12,13]:
dictB[i] = i * 10
print dictB,'\n\n'
'''
Exploring a dict
'''
#print dir(dictB)
'''
['__class__', '__cmp__', '__contains__', '__delattr__', '__delitem__', '__doc__', '__eq__', '__format__', '__ge__',
'__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__iter__', '__le__', '__len__', '__lt__',
'__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setitem__', '__sizeof__', '__str__',
'__subclasshook__', 'clear', 'copy', 'fromkeys', 'get', 'has_key', 'items', 'iteritems', 'iterkeys', 'itervalues', 'keys',
'pop', 'popitem', 'setdefault', 'update', 'values', 'viewitems', 'viewkeys', 'viewvalues']
'''
'''
Lets explore them ...
'''
print "__class__",' = ',dictB.__class__,'\n\n'
print "Values ",' = ',dictB.values(),'\n\n'
print "Keys ",' = ',dictB.keys(),'\n\n'
print "Items ",' = ',dictB.items(),'\n\n'
print "iteritems ",' = ',dictB.iteritems(),'\n\n'
print "iterkeys ",' = ',dictB.iterkeys(),'\n\n'
print "itervalues ",' = ',dictB.itervalues(),'\n\n'
print "viewitems ",' = ',dictB.viewitems(),'\n\n'
print "viewkeys ",' = ',dictB.viewkeys(),'\n\n'
print "viewvalues ",' = ',dictB.viewvalues(),'\n\n'
'''
Check if the dict has certain key
'''
print "Check for Key = 0 in dictA and dictB ",'\n\n',dictA.has_key(0),'\n',dictB.has_key(1000),'\n'
'''
Accessing the value of a dictionary
'''
'''
Now I want to access dictA and get some values
'''
print "Acessing dictA using [] ",'\n'
print dictA[5] # same as dictA.__getitem__(5)
# This will generate KeyError
#print dictA['Arun'] # same as dictA.__getitem__('Arun')
'''
In the case above, when we access as dictA[5]
if the key is not there we will get KeyError
to avoid that use .get() method , it shall return None
in that case
'''
print "Acessing dictA using .get() ",'\n'
print dictA.get(1000,"Key aint there yet"),'\n\n'
'''
Iterate and print the keys and values
'''
print "Iterate and print the keys and values .... ",'\n\n'
for key , value in dictB.items():
print "Key = %s and Value = %s "%(key,value),'\n\n'
'''
Clear the Values in the dictionary
** Before we clear, lets pick the memory location **
'''
'''
When we use .clear to empty the dictionary
the address will not get changed
'''
print "Memory Address dictB - Before Clear %s "%id(dictB),'\n\n'
dictB.clear()
print "dictB = %s "%dictB,'\n\n'
print "Memory Address dictB - After Clear %s "%id(dictB),'\n\n'
'''
When we use {} to empty the dictionary
the address will get changed
'''
print "Memory Address dictA - Before Clear %s "%id(dictA),'\n\n'
dictA = {}
print "dictA = %s "%dictA,'\n\n'
print "Memory Address dictA - After Clear %s "%id(dictA),'\n\n'
|
gpl-3.0
| -2,898,241,896,216,891,000
| 19.179775
| 125
| 0.599109
| false
| 2.91322
| false
| false
| false
|
samshara/Stock-Market-Analysis-and-Prediction
|
smap_nepse/preprocessing/visualization.py
|
1
|
6795
|
import pandas as pd
import numpy as np
import csv
import os
import matplotlib.pyplot as plt
import cufflinks as cf
import plotly
import plotly.offline as py
from plotly.offline.offline import _plot_html
import plotly.graph_objs as go
from plotly.tools import FigureFactory as FF
cf.set_config_file(world_readable=False,offline=True)
plt.style.use('ggplot')
def plot(name, *, cols=[], plot_kind=None, start_date=None, end_date=None):
""" Plots selected financial data of selected company which ranges over specified
date range[start_date:end_date]. The plot is as specified by the plot_kind parameter.
:param
name: company's ticker
cols: list of columns specifying data fields to plot.
kind: type of plot. One of 'line', 'box', 'hexbin','scatter_matrix'.
start_date: The data is indexed by the Date column. starting date specifies
the first date index row to be plotted.
end_date: end_date specifies the last date index row to be plotted.
"""
header = ['Date','Total Transactions','Traded Shares','TotalTraded Amount',
'Maximum Price','Minimum Price','Closing Price']
plottypes = ['line', 'box', 'hexbin','scatter_matrix']
if cols is None or not cols:
cols = header[1:]
if plot_kind is None:
plot_kind = 'line'
if not set(cols) <= set(header):
raise ValueError('{} is not a valid column list in the data present.'.format(cols))
if not plot_kind in plottypes:
raise ValueError('{} is not a valid plot type. Please enter one of these {}.'.format(plot_kind, plottypes))
filename = name
try:
data = pd.read_csv(filename,index_col=0, parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return None
if plot_kind == 'scatter_matrix':
scatter_matrix(data.ix[:,cols][start_date:end_date], alpha=0.2, diagonal='kde')
elif plot_kind == 'hexbin':
if len(cols) < 2:
print('invalid no of columns for a hexbin plot. Two data columns are required.')
return None
data.ix[:,cols][start_date:end_date].plot(kind=plot_kind, x=cols[0], y=cols[1], gridsize=25)
else:
data.ix[:,cols][start_date:end_date].plot(kind=plot_kind,subplots=True,
title='{} Plot of {}.'.format(plot_kind.title(),name))
plt.show()
def comparision_plot(name,*, cols=None, plot_kind=None, start_date=None, end_date=None):
""" Plots selected financial data of selected companies which ranges over specified
date range[start_date:end_date]. The plot is as specified by the plot_kind parameter.
:param
name: list of companies ticker.
cols: list of columns specifying data fields to plot.
kind: type of plot. One of 'line', 'box'.
start_date: The data is indexed by the Date column. starting date specifies
the first date index row to be plotted.
end_date: end_date specifies the last date index row to be plotted.
"""
header = ['Date','Total Transactions','Traded Shares','TotalTraded Amount',
'Maximum Price','Minimum Price','Closing Price']
plottypes = ['line', 'box']
if cols is None or not cols:
cols = header[1:]
if plot_kind is None:
plot_kind = 'line'
if not set(cols) <= set(header):
raise ValueError('{} is not a valid column list in the data present.'.format(cols))
if not plot_kind in plottypes:
raise ValueError('{} is not a valid plot type. Please enter one of these {}.'.format(plot_kind, plottypes))
filenames = name
try:
data = pd.concat([pd.read_csv(company, index_col=0, parse_dates=True) for company in filenames], axis=1, keys=name)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return None
ax = data.ix[:, data.columns.get_level_values(1).isin(set(cols))][start_date:end_date].plot()
ax.set_title('{} Plot of {} of {}.'.format(plot_kind.title(),','.join(cols), ','.join([s.strip('.csv') for s in name])))
plt.legend(title='Companies', fancybox=True, shadow=True, loc='best')
plt.show()
def financialplots(filename, plotkind):
try:
data = pd.read_csv(filename,index_col=0, parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return None
if plotkind == 'candlestick':
fig = FF.create_candlestick(data['Opening Price'], data['Maximum Price'], data['Minimum Price'], data['Closing Price'],dates=data.index)
elif plotkind == 'macd':
fig = data['Closing Price'].ta_plot(study='macd', fast_period=12, slow_period=26, signal_period=9, asFigure=True)
elif plotkind == 'boll':
fig = data['Closing Price'].ta_plot(study='boll',asFigure=True)
elif plotkind == 'ohlc':
fig = FF.create_ohlc(data['Opening Price'], data['Maximum Price'], data['Minimum Price'], data['Closing Price'],dates=data.index)
elif plotkind == 'sma':
fig = data['Closing Price'].ta_plot(study='sma', asFigure=True)
py.plot(fig,filename='../../plots/'+filename[:-4]+plotkind,validate=False,auto_open=False)
#py.plot(fig,image='png',image_width=1200, image_height=800)
def statisticplots(filename, plotkind,columns):
try:
data = pd.read_csv(filename,index_col=0, parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return None
if columns is None or not columns:
columns = list(data.columns.values)
data = data.ix[:,columns]
if plotkind == 'scattermatrix':
fig = FF.create_scatterplotmatrix(data,diag='box',title='Scattermatrix plot of {}'.format(filename[:-4]))
elif plotkind == 'line':
fig = data.iplot(theme='pearl',kind='scatter',title='Line plot of {}.'.format(filename[:-4]),subplots=True,asFigure=True)
elif plotkind == 'box':
fig = data.iplot(theme='pearl',kind='box',title='Box plot of {}.'.format(filename[:-4]),asFigure=True)
py.plot(fig,filename='../../plots/'+filename[:-4]+plotkind,validate=False,auto_open=False)
def compare(names,columns):
try:
data = pd.concat([pd.read_csv(company, index_col=0, parse_dates=True) for company in names], axis=1, keys=names)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return None
data = data.ix[:,data.columns.get_level_values(1).isin(set(columns))]
fig = data.iplot(theme='pearl',kind='scatter',title='Line Plot of {} of {}.'.format(','.join(columns), ','.join([s.strip('.csv') for s in names])),subplots=True,asFigure=True)
py.plot(fig,filename='../../plots/'+compareplot,validate=False,auto_open=False)
|
mit
| 210,266,800,827,579,520
| 45.541096
| 179
| 0.647976
| false
| 3.655191
| false
| false
| false
|
michael-lazar/mailcap_fix
|
tests/test_mailcap_python.py
|
1
|
10329
|
###############################################################################
# This file was copied from cpython/Lib/tests/test_mailcap.py
# Some lines have been modified to work without the python test runner
###############################################################################
import os
import copy
import unittest
from mailcap_fix import mailcap
# Location of mailcap file
MAILCAPFILE = os.path.join(os.path.dirname(__file__), 'data/mailcap.txt')
# Dict to act as mock mailcap entry for this test
# The keys and values should match the contents of MAILCAPFILE
MAILCAPDICT = {
'application/x-movie':
[{'compose': 'moviemaker %s',
'x11-bitmap': '"/usr/lib/Zmail/bitmaps/movie.xbm"',
'description': '"Movie"',
'view': 'movieplayer %s',
'lineno': 4}],
'application/*':
[{'copiousoutput': '',
'view': 'echo "This is \\"%t\\" but is 50 \\% Greek to me" \\; cat %s',
'lineno': 5}],
'audio/basic':
[{'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6}],
'video/mpeg':
[{'view': 'mpeg_play %s', 'lineno': 13}],
'application/postscript':
[{'needsterminal': '', 'view': 'ps-to-terminal %s', 'lineno': 1},
{'compose': 'idraw %s', 'view': 'ps-to-terminal %s', 'lineno': 2}],
'application/x-dvi':
[{'view': 'xdvi %s', 'lineno': 3}],
'message/external-body':
[{'composetyped': 'extcompose %s',
'description': '"A reference to data stored in an external location"',
'needsterminal': '',
'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
'lineno': 10}],
'text/richtext':
[{'test': 'test "`echo %{charset} | tr \'[A-Z]\' \'[a-z]\'`" = iso-8859-8',
'copiousoutput': '',
'view': 'shownonascii iso-8859-8 -e richtext -p %s',
'lineno': 11}],
'image/x-xwindowdump':
[{'view': 'display %s', 'lineno': 9}],
'audio/*':
[{'view': '/usr/local/bin/showaudio %t', 'lineno': 7}],
'video/*':
[{'view': 'animate %s', 'lineno': 12}],
'application/frame':
[{'print': '"cat %s | lp"', 'view': 'showframe %s', 'lineno': 0}],
'image/rgb':
[{'view': 'display %s', 'lineno': 8}]
}
# For backwards compatibility, readmailcapfile() and lookup() still support
# the old version of mailcapdict without line numbers.
MAILCAPDICT_DEPRECATED = copy.deepcopy(MAILCAPDICT)
for entry_list in MAILCAPDICT_DEPRECATED.values():
for entry in entry_list:
entry.pop('lineno')
class HelperFunctionTest(unittest.TestCase):
def test_listmailcapfiles(self):
# The return value for listmailcapfiles() will vary by system.
# So verify that listmailcapfiles() returns a list of strings that is of
# non-zero length.
mcfiles = mailcap.listmailcapfiles()
self.assertIsInstance(mcfiles, list)
for m in mcfiles:
self.assertIsInstance(m, str)
env = os.environ
# According to RFC 1524, if MAILCAPS env variable exists, use that
# and only that.
if "MAILCAPS" in env:
env_mailcaps = env["MAILCAPS"].split(os.pathsep)
else:
env_mailcaps = ["/testdir1/.mailcap", "/testdir2/mailcap"]
env["MAILCAPS"] = os.pathsep.join(env_mailcaps)
mcfiles = mailcap.listmailcapfiles()
self.assertEqual(env_mailcaps, mcfiles)
def test_readmailcapfile(self):
# Test readmailcapfile() using test file. It should match MAILCAPDICT.
with open(MAILCAPFILE, 'r') as mcf:
d = mailcap.readmailcapfile(mcf)
self.assertDictEqual(d, MAILCAPDICT_DEPRECATED)
def test_lookup(self):
# Test without key
expected = [{'view': 'animate %s', 'lineno': 12},
{'view': 'mpeg_play %s', 'lineno': 13}]
actual = mailcap.lookup(MAILCAPDICT, 'video/mpeg')
self.assertListEqual(expected, actual)
# Test with key
key = 'compose'
expected = [{'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6}]
actual = mailcap.lookup(MAILCAPDICT, 'audio/basic', key)
self.assertListEqual(expected, actual)
# Test on user-defined dicts without line numbers
c = copy.deepcopy(MAILCAPDICT)
for entry_list in c.values():
for entry in entry_list:
entry.pop('lineno')
expected = [{'view': 'mpeg_play %s'}, {'view': 'animate %s'}]
actual = mailcap.lookup(c, 'video/mpeg')
self.assertListEqual(expected, actual)
def test_subst(self):
plist = ['id=1', 'number=2', 'total=3']
# test case: ([field, MIMEtype, filename, plist=[]], <expected string>)
test_cases = [
(["", "audio/*", "foo.txt"], ""),
(["echo foo", "audio/*", "foo.txt"], "echo foo"),
(["echo %s", "audio/*", "foo.txt"], "echo foo.txt"),
(["echo %t", "audio/*", "foo.txt"], "echo audio/*"),
(["echo \%t", "audio/*", "foo.txt"], "echo %t"),
(["echo foo", "audio/*", "foo.txt", plist], "echo foo"),
(["echo %{total}", "audio/*", "foo.txt", plist], "echo 3")
]
for tc in test_cases:
self.assertEqual(mailcap.subst(*tc[0]), tc[1])
class GetcapsTest(unittest.TestCase):
def test_mock_getcaps(self):
# Test mailcap.getcaps() using mock mailcap file in this dir.
# Temporarily override any existing system mailcap file by pointing the
# MAILCAPS environment variable to our mock file.
os.environ["MAILCAPS"] = MAILCAPFILE
caps = mailcap.getcaps()
self.assertDictEqual(caps, MAILCAPDICT)
def test_system_mailcap(self):
# Test mailcap.getcaps() with mailcap file(s) on system, if any.
caps = mailcap.getcaps()
self.assertIsInstance(caps, dict)
mailcapfiles = mailcap.listmailcapfiles()
existingmcfiles = [mcf for mcf in mailcapfiles if os.path.exists(mcf)]
if existingmcfiles:
# At least 1 mailcap file exists, so test that.
for (k, v) in caps.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, list)
for e in v:
self.assertIsInstance(e, dict)
else:
# No mailcap files on system. getcaps() should return empty dict.
self.assertEqual({}, caps)
class FindmatchTest(unittest.TestCase):
def test_findmatch(self):
# default findmatch arguments
c = MAILCAPDICT
fname = "foo.txt"
plist = ["access-type=default", "name=john", "site=python.org",
"directory=/tmp", "mode=foo", "server=bar"]
audio_basic_entry = {
'edit': 'audiocompose %s',
'compose': 'audiocompose %s',
'description': '"An audio fragment"',
'view': 'showaudio %s',
'lineno': 6
}
audio_entry = {"view": "/usr/local/bin/showaudio %t", 'lineno': 7}
video_entry = {'view': 'animate %s', 'lineno': 12}
message_entry = {
'composetyped': 'extcompose %s',
'description': '"A reference to data stored in an external location"', 'needsterminal': '',
'view': 'showexternal %s %{access-type} %{name} %{site} %{directory} %{mode} %{server}',
'lineno': 10,
}
# test case: (findmatch args, findmatch keyword args, expected output)
# positional args: caps, MIMEtype
# keyword args: key="view", filename="/dev/null", plist=[]
# output: (command line, mailcap entry)
cases = [
([{}, "video/mpeg"], {}, (None, None)),
([c, "foo/bar"], {}, (None, None)),
([c, "video/mpeg"], {}, ('animate /dev/null', video_entry)),
([c, "audio/basic", "edit"], {}, ("audiocompose /dev/null", audio_basic_entry)),
([c, "audio/basic", "compose"], {}, ("audiocompose /dev/null", audio_basic_entry)),
([c, "audio/basic", "description"], {}, ('"An audio fragment"', audio_basic_entry)),
([c, "audio/basic", "foobar"], {}, (None, None)),
([c, "video/*"], {"filename": fname}, ("animate %s" % fname, video_entry)),
([c, "audio/basic", "compose"],
{"filename": fname},
("audiocompose %s" % fname, audio_basic_entry)),
([c, "audio/basic"],
{"key": "description", "filename": fname},
('"An audio fragment"', audio_basic_entry)),
([c, "audio/*"],
{"filename": fname},
("/usr/local/bin/showaudio audio/*", audio_entry)),
([c, "message/external-body"],
{"plist": plist},
("showexternal /dev/null default john python.org /tmp foo bar", message_entry))
]
self._run_cases(cases)
@unittest.skipUnless(os.name == "posix", "Requires 'test' command on system")
def test_test(self):
# findmatch() will automatically check any "test" conditions and skip
# the entry if the check fails.
caps = {"test/pass": [{"test": "test 1 -eq 1"}],
"test/fail": [{"test": "test 1 -eq 0"}]}
# test case: (findmatch args, findmatch keyword args, expected output)
# positional args: caps, MIMEtype, key ("test")
# keyword args: N/A
# output: (command line, mailcap entry)
cases = [
# findmatch will return the mailcap entry for test/pass because it evaluates to true
([caps, "test/pass", "test"], {}, ("test 1 -eq 1", {"test": "test 1 -eq 1"})),
# findmatch will return None because test/fail evaluates to false
([caps, "test/fail", "test"], {}, (None, None))
]
self._run_cases(cases)
def _run_cases(self, cases):
for c in cases:
self.assertEqual(mailcap.findmatch(*c[0], **c[1]), c[2])
|
unlicense
| 7,113,809,215,644,407,000
| 41.336066
| 104
| 0.538871
| false
| 3.762842
| true
| false
| false
|
PKRoma/httpie
|
tests/test_compress.py
|
1
|
3882
|
"""
We test against httpbin which doesn't return the request data in a
consistent way:
1. Non-form requests: the `data` field contains base64 encoded version of
our zlib-encoded request data.
2. Form requests: `form` contains a messed up version of the data.
"""
import base64
import zlib
from .fixtures import FILE_PATH, FILE_CONTENT
from httpie.status import ExitStatus
from .utils import StdinBytesIO, http, HTTP_OK, MockEnvironment
def assert_decompressed_equal(base64_compressed_data, expected_str):
compressed_data = base64.b64decode(
base64_compressed_data.split(',', 1)[1])
data = zlib.decompress(compressed_data)
actual_str = data.decode()
# FIXME: contains a trailing linebreak with an uploaded file
actual_str = actual_str.rstrip()
assert actual_str == expected_str
def test_cannot_combine_compress_with_chunked(httpbin):
r = http('--compress', '--chunked', httpbin.url + '/get',
tolerate_error_exit_status=True)
assert r.exit_status == ExitStatus.ERROR
assert 'cannot combine --compress and --chunked' in r.stderr
def test_cannot_combine_compress_with_multipart(httpbin):
r = http('--compress', '--multipart', httpbin.url + '/get',
tolerate_error_exit_status=True)
assert r.exit_status == ExitStatus.ERROR
assert 'cannot combine --compress and --multipart' in r.stderr
def test_compress_skip_negative_ratio(httpbin_both):
r = http(
'--compress',
httpbin_both + '/post',
'foo=bar',
)
assert HTTP_OK in r
assert 'Content-Encoding' not in r.json['headers']
assert r.json['json'] == {'foo': 'bar'}
def test_compress_force_with_negative_ratio(httpbin_both):
r = http(
'--compress',
'--compress',
httpbin_both + '/post',
'foo=bar',
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert_decompressed_equal(r.json['data'], '{"foo": "bar"}')
def test_compress_json(httpbin_both):
r = http(
'--compress',
'--compress',
httpbin_both + '/post',
'foo=bar',
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert_decompressed_equal(r.json['data'], '{"foo": "bar"}')
assert r.json['json'] is None
def test_compress_form(httpbin_both):
r = http(
'--form',
'--compress',
'--compress',
httpbin_both + '/post',
'foo=bar',
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert r.json['data'] == ""
assert '"foo": "bar"' not in r
def test_compress_raw(httpbin_both):
r = http(
'--raw',
FILE_CONTENT,
'--compress',
'--compress',
httpbin_both + '/post',
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert_decompressed_equal(r.json['data'], FILE_CONTENT.strip())
def test_compress_stdin(httpbin_both):
env = MockEnvironment(
stdin=StdinBytesIO(FILE_PATH.read_bytes()),
stdin_isatty=False,
)
r = http(
'--compress',
'--compress',
'PATCH',
httpbin_both + '/patch',
env=env,
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert_decompressed_equal(r.json['data'], FILE_CONTENT.strip())
assert not r.json['json']
def test_compress_file(httpbin_both):
r = http(
'--form',
'--compress',
'--compress',
'PUT',
httpbin_both + '/put',
f'file@{FILE_PATH}',
)
assert HTTP_OK in r
assert r.json['headers']['Content-Encoding'] == 'deflate'
assert r.json['headers']['Content-Type'].startswith(
'multipart/form-data; boundary=')
assert r.json['files'] == {}
assert FILE_CONTENT not in r
|
bsd-3-clause
| 5,335,729,547,268,975,000
| 26.728571
| 73
| 0.607161
| false
| 3.611163
| true
| false
| false
|
alirizakeles/tendenci
|
tendenci/apps/accountings/management/commands/correct_membership_acct_number.py
|
1
|
2067
|
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
class Command(BaseCommand):
"""
Correct the account_number of AcctTran records for the memberships
for those that are wrongly assigned with the event's account_number.
Usage: python manage.py correct_membership_acct_number
"""
def handle(self, *args, **options):
from tendenci.apps.memberships.models import MembershipDefault
from tendenci.apps.invoices.models import Invoice
from tendenci.apps.accountings.models import Acct, AcctEntry
account_number = MembershipDefault().get_acct_number()
acct = Acct.objects.get(account_number=account_number)
accts_ignore = Acct.objects.filter(
account_number__in=['220000',
'120000',
'106000']
)
num_trans_updated = 0
[content_type] = ContentType.objects.filter(
app_label='memberships',
model='membershipdefault'
)[:1] or [None]
if content_type:
membership_invoices = Invoice.objects.filter(
object_type=content_type
)
for invoice in membership_invoices:
acct_entries = AcctEntry.objects.filter(
source='invoice',
object_id=invoice.id)
for ae in acct_entries:
acct_trans = ae.trans.exclude(
account=acct).exclude(
account__in=accts_ignore)
if acct_trans.exists():
num_trans_updated += acct_trans.count()
acct_trans.update(account=acct)
print '# acct_tran updated ', num_trans_updated
|
gpl-3.0
| 4,600,237,004,960,355,300
| 42.0625
| 72
| 0.50895
| false
| 5.286445
| false
| false
| false
|
south-coast-science/scs_core
|
src/scs_core/position/nmea/gpgga.py
|
1
|
4215
|
"""
Created on 30 Dec 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Global positioning system fix data
$xxGGA,time,lat,NS,long,EW,quality,numSV,HDOP,alt,M,sep,M,diffAge,diffStation*cs
example sentence:
$GPGGA,092725.00,4717.11399,N,00833.91590,E,1,08,1.01,499.6,M,48.0,M,,*5B
example values:
GPGGA:{time:GPTime:{time:141058.00}, loc:GPLoc:{lat:5049.38432, ns:N, lng:00007.37801, ew:W}, quality:2, num_sv:06,
hdop:3.10, alt:37.5, sep:45.4, diff_age:None, diff_station:0000}
GPGGA:{time:GPTime:{time:140047.00}, loc:GPLoc:{lat:None, ns:None, lng:None, ew:None}, quality:0, num_sv:00,
hdop:99.99, alt:None, sep:None, diff_age:None, diff_station:None}
https://www.nmea.org
https://en.wikipedia.org/wiki/NMEA_0183
"""
from scs_core.position.nmea.gploc import GPLoc
from scs_core.position.nmea.gptime import GPTime
from scs_core.position.nmea.nmea_sentence import NMEASentence
# --------------------------------------------------------------------------------------------------------------------
class GPGGA(NMEASentence):
"""
classdocs
"""
MESSAGE_IDS = ("$GNGGA", "$GPGGA")
QUALITY_NO_FIX = 0
QUALITY_AUTONOMOUS_GNSS = 1
QUALITY_DIFFERENTIAL_GNSS = 2
QUALITY_ESTIMATED_FIX = 6
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, r):
if r.message_id not in cls.MESSAGE_IDS:
raise TypeError("invalid sentence:%s" % r)
time = GPTime(r.str(1))
lat = r.str(2)
ns = r.str(3)
lng = r.str(4)
ew = r.str(5)
loc = GPLoc(lat, ns, lng, ew)
quality = r.int(6)
num_sv = r.int(7)
hdop = r.float(8, 3)
alt = r.float(9, 2)
sep = r.float(11, 2)
diff_age = r.float(13, 3)
diff_station = r.str(14)
return GPGGA(r.message_id, time, loc, quality, num_sv, hdop, alt, sep, diff_age, diff_station)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, message_id, time, loc, quality, num_sv, hdop, alt, sep, diff_age, diff_station):
"""
Constructor
"""
super().__init__(message_id)
self.__time = time # GPTime
self.__loc = loc # GPLoc
self.__quality = quality # int
self.__num_sv = num_sv # int
self.__hdop = hdop # float(2)
self.__alt = alt # float(1) - altitude (metres)
self.__sep = sep # float(1) - geoid separation (metres)
self.__diff_age = diff_age # float(3) - age of differential corrections (seconds)
self.__diff_station = diff_station # string - ID of station providing differential corrections
# ----------------------------------------------------------------------------------------------------------------
@property
def time(self):
return self.__time
@property
def loc(self):
return self.__loc
@property
def quality(self):
return self.__quality
@property
def num_sv(self):
return self.__num_sv
@property
def hdop(self):
return self.__hdop
@property
def alt(self):
return self.__alt
@property
def sep(self):
return self.__sep
@property
def diff_age(self):
return self.__diff_age
@property
def diff_station(self):
return self.__diff_station
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "GPGGA:{source:%s, time:%s, loc:%s, quality:%s, num_sv:%s, hdop:%s, alt:%s, sep:%s, " \
"diff_age:%s, diff_station:%s}" % \
(self.source, self.time, self.loc, self.quality, self.num_sv, self.hdop, self.alt, self.sep,
self.diff_age, self.diff_station)
|
mit
| 7,338,479,952,412,405,000
| 27.869863
| 118
| 0.474259
| false
| 3.53311
| false
| false
| false
|
knights-lab/SHOGUN
|
shogun/utils/tree.py
|
1
|
2735
|
from collections import defaultdict
import csv
class Taxonomy:
def __init__(self, filename: str):
self.tax = self.parse_taxonomy(filename)
@classmethod
def parse_taxonomy(cls, filename: str) -> dict:
with open(filename) as inf:
csv_inf = csv.reader(inf, delimiter='\t')
taxa_map = dict(csv_inf)
return taxa_map
def __call__(self, id: str):
return self.tax[id]
class LCATaxonomy:
def __init__(self,
node_id_to_taxa_name: dict,
ref_to_node_id_ix_level: dict,
ref_to_taxa_name: dict,
node_id_to_ancestors
):
self.node_id_to_taxa_name = node_id_to_taxa_name
self.ref_to_node_id_ix_level = ref_to_node_id_ix_level
self.ref_to_taxa_name = ref_to_taxa_name
self.num_nodes = len(self.node_id_to_taxa_name)
self.node_id_to_ancestors = node_id_to_ancestors
TAX_LEVELS = ['k', 'p', 'c', 'o', 'f', 'g', 's', 't']
def tree(): return defaultdict(tree)
def add_tree(t, path):
for node in path.split(';'):
t = t[node]
def build_tree_from_tax_file(filename: str) -> LCATaxonomy:
with open(filename) as inf:
csv_inf = csv.reader(inf, delimiter='\t')
ref_to_taxa_name = dict(csv_inf)
taxa_name_to_node_id_ix_level = {"root": (0, 0, 0)}
current_node_id = 1
node_id_to_ancestors = [{0}]
for ix, (ref, taxa_name) in enumerate(ref_to_taxa_name.items()):
split = taxa_name.split(";")
ancestors = [0]
for level in range(len(split)):
taxa_name = ";".join(split[:level+1])
if taxa_name in taxa_name_to_node_id_ix_level:
found_node_id, _, _ = taxa_name_to_node_id_ix_level[taxa_name]
# Check if blank level
if len(split[level]) > 3:
ancestors.append(found_node_id)
else:
taxa_name_to_node_id_ix_level[taxa_name] = (current_node_id, ix, level + 1)
# Check if blank level
if len(split[level]) > 3:
ancestors.append(current_node_id)
current_node_id += 1
node_id_to_ancestors.append(set(ancestors))
ref_to_node_id_ix_level = {ref: taxa_name_to_node_id_ix_level[taxa_name] for ref, taxa_name in ref_to_taxa_name.items()}
node_id_to_taxa_name = {node_id: taxa_name for taxa_name, (node_id, ix, level) in taxa_name_to_node_id_ix_level.items()}
return LCATaxonomy(
node_id_to_taxa_name=node_id_to_taxa_name,
ref_to_node_id_ix_level=ref_to_node_id_ix_level,
ref_to_taxa_name=ref_to_taxa_name,
node_id_to_ancestors=node_id_to_ancestors
)
|
agpl-3.0
| -8,318,682,223,261,966,000
| 34.064103
| 124
| 0.564534
| false
| 3.002195
| false
| false
| false
|
ramovsky/lunch
|
tests/test_config.py
|
1
|
1313
|
import unittest
from tempfile import NamedTemporaryFile
from lunch.session import Session, SessionFinished, Config, User
class TestConfig(unittest.TestCase):
def test_add_place(self):
config = Config()
config.add_place('Sisaket')
self.assertAlmostEqual(1, len(config))
config.add_place('Indian')
self.assertAlmostEqual(2, len(config))
config.add_place('Sisaket')
self.assertAlmostEqual(2, len(config))
def test_update_place(self):
config = Config()
config.add_place('Sisaket')
self.assertAlmostEqual(.5, config.places['Sisaket'])
config.add_place('Sisaket', .7)
self.assertAlmostEqual(.7, config.places['Sisaket'])
def test_save_load(self):
file = NamedTemporaryFile().name
config = Config(file)
config.add_place('Sisaket')
config.save()
places = config.places
config = Config(file)
config.load()
self.assertEqual(places, config.places)
def test_history_crop(self):
file = NamedTemporaryFile().name
config = Config(file)
for i in range(10):
config.add_winner('Sisaket')
config.save()
config = Config(file)
config.load()
self.assertEqual(7, len(config._history))
|
mit
| -7,776,214,906,230,288,000
| 29.534884
| 64
| 0.621478
| false
| 3.954819
| true
| false
| false
|
jpfairbanks/streaming
|
moving_average.py
|
1
|
2010
|
from __future__ import print_function
"""
This module produces a stream of random variables from a moving average model.
The first command line argument is the number of samples negative means infinite.
The second argument is the window size. The moving average is uniform over the window.
The third argument is the file destination for the data it should be a filename.
the output to the user goes on stderr and the data generated goes onto a variable fp,
which defaults to stdout.
"""
import sys
from time import time
import stream
import random as rand
from stream import chop, repeatcall, item
# handle command line flags
view_len = int(sys.argv[1])
print("num_samples: %d" % view_len, file=sys.stderr)
if view_len < 0:
print("infinite samples", file=sys.stderr)
win_len = int(sys.argv[2])
print("window_length: %d" % win_len, file=sys.stderr)
if len(sys.argv) < 4 :
fp = sys.stdout
else:
try:
fp = open(sys.argv[3], 'w')
except IOError:
print("couldn't open file; using stdout")
fp = sys.stdout
print(str(fp), file=sys.stderr)
#define what we need to do moving averages
weights = [1.0/win_len for i in range(win_len)]
def inner(window):
""" Computes the inner product of window and weights.
weights must be defined outside to avoid a useless rezipping
when using this in a stream.
"""
acc = sum((i*w for i,w in zip(window, weights)))
return acc
#get an infinite stream of uniform random floats
zsource = repeatcall(rand.random)
# WIRING
# make our moving average window
winstream = ( zsource >> chop(win_len) )
# compute the windowed average
xstream = ( winstream >> stream.map(inner) )
# EXECUTING
if view_len > 0:
ts = time()
for i in range(view_len):
fp.write(str(next(xstream.iterator))+'\n')
print("time: %f" % (time()-ts), file=sys.stderr)
print("items_per_sec: %f" % (view_len/(time()-ts)), file=sys.stderr)
if view_len < 0:
while True:
fp.write(str(next(xstream.iterator))+'\n')
|
bsd-3-clause
| -7,765,423,684,961,808,000
| 29.923077
| 86
| 0.687562
| false
| 3.378151
| false
| false
| false
|
MarkusHackspacher/unknown-horizons
|
horizons/world/building/settler.py
|
1
|
19857
|
# ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
import logging
import math
from horizons.command.building import Build
from horizons.command.production import ToggleActive
from horizons.component.collectingcomponent import CollectingComponent
from horizons.component.storagecomponent import StorageComponent
from horizons.constants import BUILDINGS, GAME, RES, TIER
from horizons.gui.tabs import SettlerOverviewTab
from horizons.messaging import (
AddStatusIcon, RemoveStatusIcon, SettlerInhabitantsChanged, SettlerUpdate,
UpgradePermissionsChanged)
from horizons.scheduler import Scheduler
from horizons.util.pathfinding.pather import StaticPather
from horizons.util.python.callback import Callback
from horizons.world.building.buildable import BuildableRect, BuildableSingle
from horizons.world.building.building import BasicBuilding
from horizons.world.building.buildingresourcehandler import BuildingResourceHandler
from horizons.world.production.producer import Producer
from horizons.world.production.production import SettlerProduction
from horizons.world.status import SettlerNotConnectedStatus, SettlerUnhappyStatus
class SettlerRuin(BasicBuilding, BuildableSingle):
"""Building that appears when a settler got unhappy. The building does nothing.
NOTE: Inheriting from BuildableSingle is necessary, cause it's built via Build Command, which
checks for buildability
"""
buildable_upon = True
walkable = True
class Settler(BuildableRect, BuildingResourceHandler, BasicBuilding):
"""Represents a settlers house, that uses resources and creates inhabitants."""
log = logging.getLogger("world.building.settler")
production_class = SettlerProduction
tabs = (SettlerOverviewTab, )
default_level_on_build = 0
def __init__(self, x, y, owner, instance=None, **kwargs):
kwargs['level'] = self.__class__.default_level_on_build # settlers always start in first level
super().__init__(x=x, y=y, owner=owner, instance=instance, **kwargs)
def __init(self, loading=False, last_tax_payed=0):
self.level_max = TIER.CURRENT_MAX # for now
self._update_level_data(loading=loading, initial=True)
self.last_tax_payed = last_tax_payed
UpgradePermissionsChanged.subscribe(self._on_change_upgrade_permissions, sender=self.settlement)
self._upgrade_production = None # referenced here for quick access
def initialize(self):
super().initialize()
SettlerInhabitantsChanged.broadcast(self, self.inhabitants)
happiness = self.__get_data("happiness_init_value")
if happiness is not None:
self.get_component(StorageComponent).inventory.alter(RES.HAPPINESS, happiness)
if self.has_status_icon:
self.get_component(StorageComponent).inventory.add_change_listener(self._update_status_icon)
# give the user a month (about 30 seconds) to build a main square in range
if self.owner.is_local_player:
Scheduler().add_new_object(self._check_main_square_in_range, self, Scheduler().get_ticks_of_month(), loops=-1)
self.__init()
self.run()
def save(self, db):
super().save(db)
db("INSERT INTO settler(rowid, inhabitants, last_tax_payed) VALUES (?, ?, ?)",
self.worldid, self.inhabitants, self.last_tax_payed)
remaining_ticks = Scheduler().get_remaining_ticks(self, self._tick)
db("INSERT INTO remaining_ticks_of_month(rowid, ticks) VALUES (?, ?)",
self.worldid, remaining_ticks)
def load(self, db, worldid):
super().load(db, worldid)
self.inhabitants, last_tax_payed = \
db("SELECT inhabitants, last_tax_payed FROM settler WHERE rowid=?", worldid)[0]
remaining_ticks = \
db("SELECT ticks FROM remaining_ticks_of_month WHERE rowid=?", worldid)[0][0]
self.__init(loading=True, last_tax_payed=last_tax_payed)
self._load_upgrade_data(db)
SettlerUpdate.broadcast(self, self.level, self.level)
self.run(remaining_ticks)
def _load_upgrade_data(self, db):
"""Load the upgrade production and relevant stored resources"""
upgrade_material_prodline = SettlerUpgradeData.get_production_line_id(self.level + 1)
if not self.get_component(Producer).has_production_line(upgrade_material_prodline):
return
self._upgrade_production = self.get_component(Producer)._get_production(upgrade_material_prodline)
# readd the res we already had, they can't be loaded since storage slot limits for
# the special resources aren't saved
resources = {}
for resource, amount in db.get_storage_rowids_by_ownerid(self.worldid):
resources[resource] = amount
for res, amount in self._upgrade_production.get_consumed_resources().items():
# set limits to what we need
self.get_component(StorageComponent).inventory.add_resource_slot(res, abs(amount))
if res in resources:
self.get_component(StorageComponent).inventory.alter(res, resources[res])
self._upgrade_production.add_production_finished_listener(self.level_up)
self.log.debug("%s: Waiting for material to upgrade from %s", self, self.level)
def _add_upgrade_production_line(self):
"""
Add a production line that gets the necessary upgrade material.
When the production finishes, it calls upgrade_materials_collected.
"""
upgrade_material_prodline = SettlerUpgradeData.get_production_line_id(self.level + 1)
self._upgrade_production = self.get_component(
Producer).add_production_by_id(upgrade_material_prodline)
self._upgrade_production.add_production_finished_listener(self.level_up)
# drive the car out of the garage to make space for the building material
for res, amount in self._upgrade_production.get_consumed_resources().items():
self.get_component(StorageComponent).inventory.add_resource_slot(res, abs(amount))
self.log.debug("%s: Waiting for material to upgrade from %s", self, self.level)
def remove(self):
SettlerInhabitantsChanged.broadcast(self, -self.inhabitants)
UpgradePermissionsChanged.unsubscribe(self._on_change_upgrade_permissions, sender=self.settlement)
super().remove()
@property
def upgrade_allowed(self):
return self.session.world.get_settlement(self.position.origin).upgrade_permissions[self.level]
def _on_change_upgrade_permissions(self, message):
production = self._upgrade_production
if production is not None:
if production.is_paused() == self.upgrade_allowed:
ToggleActive(self.get_component(Producer), production).execute(self.session, True)
@property
def happiness(self):
difficulty = self.owner.difficulty
result = int(round(difficulty.extra_happiness_constant + self.get_component(StorageComponent).inventory[RES.HAPPINESS] * difficulty.happiness_multiplier))
return max(0, min(result, self.get_component(StorageComponent).inventory.get_limit(RES.HAPPINESS)))
@property
def capacity_utilization(self):
# this concept does not make sense here, so spare us the calculations
return 1.0
def _update_level_data(self, loading=False, initial=False):
"""Updates all settler-related data because of a level change or as initialization
@param loading: whether called to set data after loading
@param initial: whether called to set data initially
"""
# taxes, inhabitants
self.tax_base = self.session.db.get_settler_tax_income(self.level)
self.inhabitants_max = self.session.db.get_tier_inhabitants_max(self.level)
self.inhabitants_min = self.session.db.get_tier_inhabitants_min(self.level)
#TODO This crops inhabitants at level down, but when can they exceed the limit?
if self.inhabitants > self.inhabitants_max:
self.inhabitants = self.inhabitants_max
# consumption:
# Settler productions are specified to be disabled by default in the db, so we can enable
# them here per level. Production data is save/loaded, so we don't need to do anything in that case
if not loading:
prod_comp = self.get_component(Producer)
current_lines = prod_comp.get_production_lines()
for prod_line in prod_comp.get_production_lines_by_level(self.level):
if not prod_comp.has_production_line(prod_line):
prod_comp.add_production_by_id(prod_line)
# cross out the new lines from the current lines, so only the old ones remain
if prod_line in current_lines:
current_lines.remove(prod_line)
for line in current_lines[:]: # iterate over copy for safe removal
# all lines, that were added here but are not used due to the current level
# NOTE: this contains the upgrade material production line
prod_comp.remove_production_by_id(line)
# Make sure to set _upgrade_production to None in case we are removing it
if self._upgrade_production is not None and line == self._upgrade_production.get_production_line_id():
self._upgrade_production = None
if not initial:
# update instance graphics
# only do it when something has actually change
# TODO: this probably also isn't necessary on loading, but it's
# not touched before the relase (2012.1)
self.update_action_set_level(self.level)
def run(self, remaining_ticks=None):
"""Start regular tick calls"""
interval = self.session.timer.get_ticks(GAME.INGAME_TICK_INTERVAL)
run_in = remaining_ticks if remaining_ticks is not None else interval
Scheduler().add_new_object(self._tick, self, run_in=run_in, loops=-1, loop_interval=interval)
def _tick(self):
"""Here we collect the functions, that are called regularly (every "month")."""
self.pay_tax()
self.inhabitant_check()
self.level_check()
def pay_tax(self):
"""Pays the tax for this settler"""
# the money comes from nowhere, settlers seem to have an infinite amount of money.
# see http://wiki.unknown-horizons.org/w/Settler_taxing
# calc taxes http://wiki.unknown-horizons.org/w/Settler_taxing#Formulae
happiness_tax_modifier = 0.5 + (float(self.happiness) / 70.0)
inhabitants_tax_modifier = float(self.inhabitants) / self.inhabitants_max
taxes = self.tax_base * self.settlement.tax_settings[self.level] * happiness_tax_modifier * inhabitants_tax_modifier
real_taxes = int(round(taxes * self.owner.difficulty.tax_multiplier))
self.settlement.owner.get_component(StorageComponent).inventory.alter(RES.GOLD, real_taxes)
self.last_tax_payed = real_taxes
# decrease happiness http://wiki.unknown-horizons.org/w/Settler_taxing#Formulae
difference = 1.0 - self.settlement.tax_settings[self.level]
happiness_decrease = 10 * difference - 6 * abs(difference)
happiness_decrease = int(round(happiness_decrease))
# NOTE: this formula was actually designed for a different use case, where the happiness
# is calculated from the number of available goods -/+ a certain tax factor.
# to simulate the more dynamic, currently implemented approach (where every event changes
# the happiness), we simulate discontent of taxes by this:
happiness_decrease -= 6
self.get_component(StorageComponent).inventory.alter(RES.HAPPINESS, happiness_decrease)
self._changed()
self.log.debug("%s: pays %s taxes, -happy: %s new happiness: %s", self, real_taxes,
happiness_decrease, self.happiness)
def inhabitant_check(self):
"""Checks whether or not the population of this settler should increase or decrease"""
sad = self.session.db.get_lower_happiness_limit()
happy = self.session.db.get_upper_happiness_limit()
change = 0
if self.happiness > happy and self.inhabitants < self.inhabitants_max:
change = 1
self.log.debug("%s: inhabitants increase to %s", self, self.inhabitants)
elif self.happiness < sad and self.inhabitants > 1:
change = -1
self.log.debug("%s: inhabitants decrease to %s", self, self.inhabitants)
if change != 0:
# see http://wiki.unknown-horizons.org/w/Supply_citizens_with_resources
self.get_component(Producer).alter_production_time(
6.0 / 7.0 * math.log(1.5 * (self.inhabitants + 1.2)))
self.inhabitants += change
SettlerInhabitantsChanged.broadcast(self, change)
self._changed()
def can_level_up(self):
return self.happiness > self.__get_data("happiness_level_up_requirement") and \
self.inhabitants >= self.inhabitants_min and not self._has_disaster()
def level_check(self):
"""Checks whether we should level up or down.
Ignores buildings with a active disaster. """
if self.can_level_up():
if self.level >= self.level_max:
# max level reached already, can't allow an update
if self.owner.max_tier_notification < self.level_max:
if self.owner.is_local_player:
self.session.ingame_gui.message_widget.add(
point=self.position.center, string_id='MAX_TIER_REACHED')
self.owner.max_tier_notification = self.level_max
return
if self._upgrade_production:
return # already waiting for res
self._add_upgrade_production_line()
if not self.upgrade_allowed:
ToggleActive(self.get_component(Producer), self._upgrade_production).execute(self.session, True)
elif self.happiness < self.__get_data("happiness_level_down_limit") or \
self.inhabitants < self.inhabitants_min:
self.level_down()
self._changed()
def level_up(self, production=None):
"""Actually level up (usually called when the upgrade material has arrived)"""
self._upgrade_production = None
# just level up later that tick, it could disturb other code higher in the call stack
def _do_level_up():
self.level += 1
self.log.debug("%s: Levelling up to %s", self, self.level)
self._update_level_data()
# update the level of our inhabitants so graphics can change
if self.has_component(CollectingComponent):
for collector in self.get_component(CollectingComponent).get_local_collectors():
collector.level_upgrade(self.level)
# Notify the world about the level up
SettlerUpdate.broadcast(self, self.level, 1)
# reset happiness value for new level
new_happiness = self.__get_data("happiness_init_value") - self.happiness
self.get_component(StorageComponent).inventory.alter(RES.HAPPINESS, new_happiness)
self._changed()
Scheduler().add_new_object(_do_level_up, self, run_in=0)
def level_down(self):
if self.level == TIER.LOWEST:
# Can't level down any more.
self.make_ruin()
self.log.debug("%s: Destroyed by lack of happiness", self)
if self.owner.is_local_player:
# check_duplicate: only trigger once for different settlers of a neighborhood
self.session.ingame_gui.message_widget.add(point=self.position.center,
string_id='SETTLERS_MOVED_OUT', check_duplicate=True)
else:
self.level -= 1
self._update_level_data()
# reset happiness value for new level
new_happiness = self.__get_data("happiness_init_value") - self.happiness
self.get_component(StorageComponent).inventory.alter(RES.HAPPINESS, new_happiness)
self.log.debug("%s: Level down to %s", self, self.level)
self._changed()
# update the level of our inhabitants so graphics can change
if self.has_component(CollectingComponent):
for collector in self.get_component(CollectingComponent).get_local_collectors():
collector.level_upgrade(self.level)
# Notify the world about the level down
SettlerUpdate.broadcast(self, self.level, -1)
def make_ruin(self):
""" Replaces itself with a ruin.
"""
command = Build(BUILDINGS.SETTLER_RUIN, self.position.origin.x,
self.position.origin.y, island=self.island, settlement=self.settlement)
# Remove the building and then place the Ruin
Scheduler().add_new_object(Callback.ChainedCallbacks(
self.remove, Callback(command, self.owner)), self, run_in=0)
def _has_disaster(self):
return hasattr(self, "disaster") and self.disaster
def _check_main_square_in_range(self):
"""Notifies the user via a message in case there is no main square in range"""
if not self.owner.is_local_player:
return # only check this for local player
for building in self.get_buildings_in_range():
if building.id == BUILDINGS.MAIN_SQUARE:
if StaticPather.get_path_on_roads(self.island, self, building) is not None:
# a main square is in range
if hasattr(self, "_main_square_status_icon"):
RemoveStatusIcon.broadcast(self, self, SettlerNotConnectedStatus)
del self._main_square_status_icon
return
if not hasattr(self, "_main_square_status_icon"):
self._main_square_status_icon = SettlerNotConnectedStatus(self) # save ref for removal later
AddStatusIcon.broadcast(self, self._main_square_status_icon)
# no main square found
# check_duplicate: only trigger once for different settlers of a neighborhood
self.session.ingame_gui.message_widget.add(point=self.position.origin,
string_id='NO_MAIN_SQUARE_IN_RANGE', check_duplicate=True)
def level_upgrade(self, lvl):
"""Settlers only level up by themselves"""
pass
def _update_status_icon(self):
if self.has_status_icon:
unhappy = self.happiness < self.__get_data("happiness_inhabitants_decrease_limit")
# check for changes
if unhappy and not hasattr(self, "_settler_status_icon"):
self._settler_status_icon = SettlerUnhappyStatus(self) # save ref for removal later
AddStatusIcon.broadcast(self, self._settler_status_icon)
if not unhappy and hasattr(self, "_settler_status_icon"):
RemoveStatusIcon.broadcast(self, self, SettlerUnhappyStatus)
del self._settler_status_icon
def __str__(self):
try:
return "{}(l:{};ihab:{};hap:{})".format(
super().__str__(), self.level,
self.inhabitants, self.happiness)
except AttributeError: # an attribute hasn't been set up
return super().__str__()
#@decorators.cachedmethod TODO: replace this with a version that doesn't leak
def __get_data(self, key):
"""Returns constant settler-related data from the db.
The values are cached by python, so the underlying data must not change."""
return int(
self.session.db("SELECT value FROM balance_values WHERE name = ?", key)[0][0]
)
class SettlerUpgradeData:
"""This is used as glue between the old upgrade system based on sqlite data used in a non-component environment
and the current component version with data in yaml"""
# basically, this is arbitrary as long as it's not the same as any of the regular
# production lines of the settler. We reuse data that has arbitrarily been set earlier
# to preserve savegame compatibility.
production_line_ids = {1: 24, 2: 35, 3: 23451, 4: 34512, 5: 45123}
def __init__(self, producer_component, upgrade_material_data):
self.upgrade_material_data = upgrade_material_data
def get_production_lines(self):
d = {}
for level, prod_line_id in self.__class__.production_line_ids.items():
d[prod_line_id] = self.get_production_line_data(level)
return d
def get_production_line_data(self, level):
"""Returns production line data for the upgrade to this level"""
prod_line_data = {'time': 1,
'changes_animation': 0,
'enabled_by_default': False,
'save_statistics': False,
'consumes': self.upgrade_material_data[level]}
return prod_line_data
@classmethod
def get_production_line_id(cls, level):
"""Returns production line id for the upgrade to this level"""
return cls.production_line_ids[level]
|
gpl-2.0
| -3,310,390,831,025,852,400
| 42.737885
| 156
| 0.733646
| false
| 3.315025
| false
| false
| false
|
petry/django-press
|
press/migrations/0008_auto__add_field_author_photo.py
|
1
|
9023
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Author.photo'
db.add_column(u'press_author', 'photo',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['photologue.Photo'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Author.photo'
db.delete_column(u'press_author', 'photo_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'photologue.photo': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'press.article': {
'Meta': {'ordering': "['modified_date']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['press.Author']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 2, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 2, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'draft'", 'unique': 'True', 'null': 'True', 'to': u"orm['press.Article']"}),
'publish_state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['press.Section']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'press.author': {
'Meta': {'ordering': "['user__first_name', 'user__last_name']", 'object_name': 'Author'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['photologue.Photo']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'press.section': {
'Meta': {'object_name': 'Section'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['press']
|
bsd-3-clause
| 4,100,511,375,027,724,300
| 74.2
| 187
| 0.548155
| false
| 3.613536
| false
| false
| false
|
wtbarnes/synthesizAR
|
synthesizAR/analysis/eis.py
|
1
|
9639
|
"""
Helpers for analyzing synthetic EIS data
"""
import os
import numpy as np
from sunpy.util.metadata import MetaDict
from sunpy.map import Map
from sunpy.io.fits import get_header
from sunpy.visualization.colormaps.cm import hinodexrt
import astropy.units as u
import astropy.io.fits
import h5py
__all__ = ['EISCube']
class EISCube(object):
"""
Spectral and spatial cube for holding Hinode EIS data
"""
def __init__(self, *args, **kwargs):
if len(args) == 1 and os.path.exists(args[0]):
data, header, wavelength = self._restore_from_file(args[0], **kwargs)
elif all([k in kwargs for k in ['data', 'header', 'wavelength']]):
data = kwargs.get('data')
header = kwargs.get('header')
wavelength = kwargs.get('wavelength')
else:
raise ValueError('''EISCube can only be initialized with a valid FITS file or NumPy
array with an associated wavelength and header.''')
# check dimensions
if data.shape[-1] != wavelength.shape[0]:
raise ValueError('''Third dimension of data cube must have the same length as
wavelength.''')
self.meta = header.copy()
self.wavelength = wavelength
self.data = data
self.cmap = kwargs.get('cmap', hinodexrt)
self._fix_header()
def __repr__(self):
return f'''synthesizAR {type(self).__name__}
-----------------------------------------
Telescope : {self.meta['telescop']}
Instrument : {self.meta['instrume']}
Area : x={self[0].xrange}, y={self[0].yrange}
Dimension : {u.Quantity(self[0].dimensions)}
Scale : {u.Quantity(self[0].scale)}
Wavelength range : {u.Quantity([self.wavelength[0], self.wavelength[-1]])}
Wavelength dimension : {len(self.wavelength)}'''
def __getitem__(self, key):
"""
Overriding indexing. If key is just one index, returns a normal `Map` object. Otherwise,
another `EISCube` object is returned.
"""
if type(self.wavelength[key].value) == np.ndarray and len(self.wavelength[key].value) > 1:
new_meta = self.meta.copy()
new_meta['wavelnth'] = (self.wavelength[key][0].value+self.wavelength[key][-1].value)/2.
return EISCube(data=self.data[:, :, key], header=new_meta,
wavelength=self.wavelength[key])
else:
meta_map2d = self.meta.copy()
meta_map2d['naxis'] = 2
for k in ['naxis3', 'ctype3', 'cunit3', 'cdelt3']:
del meta_map2d[k]
meta_map2d['wavelnth'] = self.wavelength[key].value
tmp_map = Map(self.data[:, :, key], meta_map2d)
tmp_map.plot_settings.update({'cmap': self.cmap})
return tmp_map
def submap(self, bottom_left_corner, top_right_corner):
"""
Crop to spatial area designated by corners
.. warning:: It is faster to crop in wavelength space first and then crop in
coordinate space.
"""
# call submap on each slice in wavelength
new_data = []
for i in range(self.wavelength.shape[0]):
new_data.append(self[i].submap(bottom_left_corner, top_right_corner).data)
new_data = np.stack(new_data, axis=2)*self.data.unit
# fix metadata
new_meta = self[0].submap(bottom_left_corner, top_right_corner).meta.copy()
for key in ['wavelnth', 'naxis3', 'ctype3', 'cunit3', 'cdelt3']:
new_meta[key] = self.meta[key]
return EISCube(data=new_data, header=new_meta, wavelength=self.wavelength)
def __add__(self, x):
"""
Allow EISCubes to be added together
"""
if isinstance(x, EISCube):
assert np.all(self.wavelength == x.wavelength), 'Wavelength ranges must be equal in order to add EISCubes'
key_checks = ['cdelt1', 'cdelt2', 'crpix1', 'crpix2', 'ctype1', 'ctype2', 'crval1',
'crval2']
for k in key_checks:
assert self.meta[k] == x.meta[k], f'{k} keys in metadata do not match'
data = self.data + x.data
else:
# if x is not an instance of EISCube, let numpy/astropy decide whether it can
# be added to the data attribute, e.g. a scalar or some 3D array with
# appropriate units
data = self.data + x
return EISCube(data=data, header=self.meta.copy(), wavelength=self.wavelength)
def __radd__(self, x):
"""
Define reverse addition in the same way as addition.
"""
return self.__add__(x)
def __mul__(self, x):
"""
Allow for multiplication of data in the cube.
"""
x = u.Quantity(x)
data = self.data*x
header = self.meta.copy()
header['bunit'] = (data.unit).to_string()
return EISCube(data=data, header=header, wavelength=self.wavelength)
def __rmul__(self, x):
"""
Define reverse multiplication in the same way as multiplication.
"""
return self.__mul__(x)
def _fix_header(self):
"""
Set any missing keys, reset any broken ones
"""
# assuming y is rows, x is columns
self.meta['naxis1'] = self.data.shape[1]
self.meta['naxis2'] = self.data.shape[0]
self.meta['naxis3'] = self.wavelength.shape[0]
def save(self, filename, use_fits=False, **kwargs):
"""
Save to FITS or HDF5 file. Default is HDF5 because this is faster and produces smaller
files.
"""
if use_fits:
self._save_to_fits(filename, **kwargs)
else:
# change extension for clarity
filename = '.'.join([os.path.splitext(filename)[0], 'h5'])
self._save_to_hdf5(filename, **kwargs)
def _save_to_hdf5(self, filename, **kwargs):
"""
Save to HDF5 file.
"""
dset_save_kwargs = kwargs.get(
'hdf5_save_params', {'compression': 'gzip', 'dtype': np.float32})
with h5py.File(filename, 'x') as hf:
meta_group = hf.create_group('meta')
for key in self.meta:
meta_group.attrs[key] = self.meta[key]
dset_wvl = hf.create_dataset('wavelength', data=self.wavelength.value)
dset_wvl.attrs['unit'] = self.wavelength.unit.to_string()
dset_intensity = hf.create_dataset('intensity', data=self.data, **dset_save_kwargs)
dset_intensity.attrs['unit'] = self.data.unit.to_string()
def _save_to_fits(self, filename, **kwargs):
"""
Save to FITS file
"""
# sanitize header
header = self.meta.copy()
if 'keycomments' in header:
del header['keycomments']
# create table to hold wavelength array
table_hdu = astropy.io.fits.BinTableHDU.from_columns(
[astropy.io.fits.Column(name='wavelength',
format='D',
unit=self.wavelength.unit.to_string(),
array=self.wavelength.value)])
# create image to hold 3D array
image_hdu = astropy.io.fits.PrimaryHDU(np.swapaxes(self.data.value.T, 1, 2),
header=astropy.io.fits.Header(header))
# write to file
hdulist = astropy.io.fits.HDUList([image_hdu, table_hdu])
hdulist.writeto(filename, output_verify='silentfix')
def _restore_from_file(self, filename, **kwargs):
"""
Load from HDF5 or FITS file
"""
use_fits = kwargs.get('use_fits', os.path.splitext(filename)[-1] == '.fits')
use_hdf5 = kwargs.get('use_hdf5', os.path.splitext(filename)[-1] == '.h5')
if use_fits:
data, header, wavelength = self._restore_from_fits(filename)
elif use_hdf5:
data, header, wavelength = self._restore_from_hdf5(filename)
else:
raise ValueError('Cube can only be initialized with a FITS or HDF5 file.')
return data, header, wavelength
def _restore_from_hdf5(self, filename):
"""
Helper to load cube from HDF5 file
"""
header = MetaDict()
with h5py.File(filename, 'r') as hf:
for key in hf['meta'].attrs:
header[key] = hf['meta'].attrs[key]
wavelength = u.Quantity(hf['wavelength'],
get_keys(hf['wavelength'].attrs, ('unit', 'units')))
data = u.Quantity(hf['intensity'], get_keys(hf['intensity'].attrs, ('unit', 'units')))
return data, header, wavelength
def _restore_from_fits(self, filename):
"""
Helper to load cube from FITS file
"""
tmp = astropy.io.fits.open(filename)
header = MetaDict(get_header(tmp)[0])
data = tmp[0].data*u.Unit(header['bunit'])
wavelength = tmp[1].data.field(0)*u.Unit(tmp[1].header['TUNIT1'])
tmp.close()
return np.swapaxes(data.T, 0, 1), header, wavelength
@property
def integrated_intensity(self):
"""
Map of the intensity integrated over wavelength.
"""
tmp = np.dot(self.data, np.gradient(self.wavelength.value))
tmp_meta = self[0].meta.copy()
tmp_meta['wavelnth'] = self.meta['wavelnth']
tmp_meta['bunit'] = (u.Unit(self.meta['bunit'])*self.wavelength.unit).to_string()
tmp_map = Map(tmp, tmp_meta)
tmp_map.plot_settings.update({'cmap': self.cmap})
return tmp_map
|
gpl-3.0
| 2,225,193,771,007,478,800
| 38.504098
| 118
| 0.5651
| false
| 3.672
| false
| false
| false
|
t3dev/odoo
|
addons/website_slides/tests/test_statistics.py
|
1
|
5815
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import math
from odoo.addons.website_slides.tests import common
from odoo.exceptions import AccessError, UserError
from odoo.tests import tagged
from odoo.tests.common import users
from odoo.tools import mute_logger, float_compare
@tagged('functional')
class TestStatistics(common.SlidesCase):
def setUp(self):
super(TestStatistics, self).setUp()
self.slide_2 = self.env['slide.slide'].sudo(self.user_publisher).create({
'name': 'How To Cook For Humans',
'channel_id': self.channel.id,
'slide_type': 'presentation',
'website_published': True,
'completion_time': 3.0,
})
self.slide_3 = self.env['slide.slide'].sudo(self.user_publisher).create({
'name': 'How To Cook Humans For Humans',
'channel_id': self.channel.id,
'slide_type': 'document',
'website_published': True,
'completion_time': 1.5,
})
@mute_logger('odoo.models')
def test_channel_statistics(self):
channel_publisher = self.channel.sudo(self.user_publisher)
# slide type computation
self.assertEqual(channel_publisher.total_slides, len(channel_publisher.slide_ids))
self.assertEqual(channel_publisher.nbr_infographic, len(channel_publisher.slide_ids.filtered(lambda s: s.slide_type == 'infographic')))
self.assertEqual(channel_publisher.nbr_presentation, len(channel_publisher.slide_ids.filtered(lambda s: s.slide_type == 'presentation')))
self.assertEqual(channel_publisher.nbr_document, len(channel_publisher.slide_ids.filtered(lambda s: s.slide_type == 'document')))
self.assertEqual(channel_publisher.nbr_video, len(channel_publisher.slide_ids.filtered(lambda s: s.slide_type == 'video')))
# slide statistics computation
self.assertEqual(float_compare(channel_publisher.total_time, sum(s.completion_time for s in channel_publisher.slide_ids), 3), 0)
# members computation
self.assertEqual(channel_publisher.members_count, 1)
channel_publisher.action_add_member()
self.assertEqual(channel_publisher.members_count, 1)
channel_publisher._action_add_members(self.user_emp.partner_id)
self.assertEqual(channel_publisher.members_count, 2)
self.assertEqual(channel_publisher.partner_ids, self.user_publisher.partner_id | self.user_emp.partner_id)
@mute_logger('odoo.models')
def test_channel_user_statistics(self):
channel_publisher = self.channel.sudo(self.user_publisher)
channel_publisher.write({
'enroll': 'invite',
})
channel_publisher._action_add_members(self.user_emp.partner_id)
channel_emp = self.channel.sudo(self.user_emp)
slides_emp = (self.slide | self.slide_2).sudo(self.user_emp)
slides_emp.action_set_viewed()
self.assertEqual(channel_emp.completion, 0)
slides_emp.action_set_completed()
channel_emp.invalidate_cache()
self.assertEqual(
channel_emp.completion,
math.ceil(100.0 * len(slides_emp) / len(channel_publisher.slide_ids)))
self.assertFalse(channel_emp.completed)
self.slide_3.sudo(self.user_emp).action_set_completed()
self.assertEqual(channel_emp.completion, 100)
self.assertTrue(channel_emp.completed)
@mute_logger('odoo.models')
def test_channel_user_statistics_complete_check_member(self):
(self.slide | self.slide_2).write({'is_preview': True})
slides_emp = (self.slide | self.slide_2).sudo(self.user_emp)
slides_emp.read(['name'])
with self.assertRaises(UserError):
slides_emp.action_set_completed()
@mute_logger('odoo.models')
def test_channel_user_statistics_view_check_member(self):
(self.slide | self.slide_2).write({'is_preview': True})
slides_emp = (self.slide | self.slide_2).sudo(self.user_emp)
slides_emp.read(['name'])
with self.assertRaises(UserError):
slides_emp.action_set_viewed()
def test_slide_user_statistics(self):
channel_publisher = self.channel.sudo(self.user_publisher)
channel_publisher._action_add_members(self.user_emp.partner_id)
slide_emp = self.slide.sudo(self.user_emp)
self.assertEqual(slide_emp.likes, 0)
self.assertEqual(slide_emp.dislikes, 0)
self.assertEqual(slide_emp.user_vote, 0)
slide_emp.action_like()
self.assertEqual(slide_emp.likes, 1)
self.assertEqual(slide_emp.dislikes, 0)
self.assertEqual(slide_emp.user_vote, 1)
slide_emp.action_dislike()
self.assertEqual(slide_emp.likes, 0)
self.assertEqual(slide_emp.dislikes, 0)
self.assertEqual(slide_emp.user_vote, 0)
slide_emp.action_dislike()
self.assertEqual(slide_emp.likes, 0)
self.assertEqual(slide_emp.dislikes, 1)
self.assertEqual(slide_emp.user_vote, -1)
def test_slide_statistics(self):
channel_publisher = self.channel.sudo(self.user_publisher)
channel_publisher._action_add_members(self.user_emp.partner_id)
self.assertEqual(self.slide.slide_views, 0)
self.assertEqual(self.slide.public_views, 0)
self.slide.write({'public_views': 4})
self.assertEqual(self.slide.slide_views, 0)
self.assertEqual(self.slide.public_views, 4)
self.assertEqual(self.slide.total_views, 4)
slide_emp = self.slide.sudo(self.user_emp)
slide_emp.action_set_viewed()
self.assertEqual(slide_emp.slide_views, 1)
self.assertEqual(slide_emp.public_views, 4)
self.assertEqual(slide_emp.total_views, 5)
|
gpl-3.0
| 6,279,237,426,971,799,000
| 42.721805
| 145
| 0.663629
| false
| 3.625312
| true
| false
| false
|
gonzalolarralde/FakeARMVE
|
fake_firmware/messages.py
|
1
|
8599
|
# -*- coding: utf-8 -*-
import platform
import struct
from construct import Container
from construct.core import FieldError
from datetime import datetime
from time import sleep
from zlib import crc32
from constants import *
from structs import *
from helpers import *
MESSAGE_HANDLERS = []
DEBUG = True
def prepare_container(device, command, data="", msg_type=MSG_COMMAND):
return Container(
version=PROTOCOL_VERSION_1,
device=device,
msg_type=msg_type,
size=7 + len(data),
command=command,
data=data)
def debug_msg(*args):
if DEBUG:
print(args)
def message_handler(device, commands):
def message_handler_call(f):
MESSAGE_HANDLERS.append((device, commands, f))
return f
return message_handler_call
def handler_for(device, command):
for handler in MESSAGE_HANDLERS:
valid_commands = handler[1] if isinstance(handler[1], list) else [handler[1]]
if handler[0] == device and command in valid_commands:
return handler[2]
return None
def process_message(common_data, client):
if common_data.msg_type != MSG_ERROR:
print "date", datetime.now()
print "msg_type", common_data.msg_type
print "device", common_data.device
print "command", common_data.command
print "data", string_to_array(common_data.data)
possible_handler = handler_for(common_data.device, common_data.command)
if possible_handler is not None:
possible_handler(common_data, client)
else:
print "command name", "UNKNOWN"
print "-------------"
else:
print "err", common_data.command
print "data", string_to_array(common_data.data)
print "-------------"
# --------------------- #
def send_initialize_ok(client):
res = Container(
response_code=INIT_RESPONSE_OK,
protocol_size=1,
protocols=[1],
model=string_to_array("RogelitoEV "),
serial_number=string_to_array("12345678"),
build=string_to_array("123"),
watchdog=0,
free_ram=65535,
free_print_mem=65535,
free_page_mem=65535,
machine_type=1)
cmd = prepare_container(DEV_AGENT, CMD_AGENT_INIT, struct_initialize_ok.build(res))
client.send(cmd)
def send_tags_list(tags, client, as_event = False):
res = Container(
number = len(tags),
serial_number = [string_to_array(x) for x in tags],
reception_level = [[100] for x in tags])
cmd = prepare_container(DEV_RFID, CMD_RFID_GET_TAGS if not as_event else EVT_RFID_NEW_TAG, \
struct_tags_list.build(res), MSG_COMMAND if not as_event else MSG_EV_PUB)
client.send(cmd)
def send_block_data(tag, block_from, block_qty, multi_block, client):
blocks_to_send = [Container(bytes=x) for x in tag["blocks"][block_from:block_qty+1]] # OOPS, for some reason the service expects one additional block to be sent, to compute CRC
if not multi_block:
cmd = prepare_container(DEV_RFID, CMD_RFID_READ_BLOCK, struct_rfid_block.build (blocks_to_send[0]))
else:
cmd = prepare_container(DEV_RFID, CMD_RFID_READ_BLOCKS, struct_rfid_blocks.build (blocks_to_send))
client.send(cmd)
def send_printer_status(paper_out_1, paper_out_2, lever_open, msg_type, command, client):
cmd = prepare_container(DEV_PRINTER, command, \
struct_printer_get_status.build(Container(paper_out_1=paper_out_1, paper_out_2=paper_out_2, lever_open=lever_open)), \
msg_type)
client.send(cmd)
def send_paper_remove(client):
cmd = prepare_container(DEV_PRINTER, CMD_PRINTER_PAPER_REMOVE, "", MSG_EV_PUB)
client.send(cmd)
# --------------------- #
@message_handler(DEV_AGENT, CMD_AGENT_INIT)
def _(common_data, client):
print "command name", "AAAA CMD_AGENT_INIT"
send_initialize_ok(client)
@message_handler(DEV_PRINTER, CMD_PRINTER_GET_STATUS)
def _(common_data, client):
print "command name", "CMD_PRINTER_GET_STATUS"
current_printer_status = client.current_printer_status
send_printer_status(current_printer_status[0], current_printer_status[1], current_printer_status[2], \
MSG_COMMAND, CMD_PRINTER_GET_STATUS, client)
@message_handler(DEV_RFID, CMD_RFID_GET_TAGS)
def _(common_data, client):
print "command name", "CMD_RFID_GET_TAGS"
send_tags_list(client.current_tags, client)
@message_handler(DEV_RFID, CMD_RFID_READ_BLOCK)
def _(common_data, client):
print "command name", "CMD_RFID_READ_BLOCK"
x = struct_read_block.parse(common_data.data)
print "serial_number", x.serial_number
print "block", x.block
send_block_data(client.get_tag(array_to_string(x.serial_number)), x.block, 1, False, client)
@message_handler(DEV_RFID, CMD_RFID_READ_BLOCKS)
def _(common_data, client):
print "command name", "CMD_RFID_READ_BLOCKS"
x = struct_read_blocks.parse(common_data.data)
print "serial_number", x.serial_number
print "block", x.block
print "number", x.number
# ToDo: Fix - For some reason I'm reading a block less than the number sent by the service
send_block_data(client.get_tag(array_to_string(x.serial_number)), x.block, x.number+1, True, client)
@message_handler(DEV_PRINTER, CMD_PRINTER_PAPER_REMOVE)
def _(common_data, client):
print "command name", "CMD_PRINTER_PAPER_REMOVE"
client.current_printer_status = [0,0,0]
send_printer_status(client.current_printer_status[0], client.current_printer_status[1], client.current_printer_status[2], \
MSG_COMMAND, CMD_PRINTER_PAPER_REMOVE, client)
client.printer_ejected()
@message_handler(DEV_RFID, CMD_RFID_WRITE_BLOCK)
def _(common_data, client):
print "command name", "CMD_RFID_WRITE_BLOCK"
x = struct_write_block.parse(common_data.data)
print "serial_number", array_to_string(x.serial_number)
print "block", x.block
print "bytes", x.rfid_block.bytes
client.write_tag(array_to_string(x.serial_number), x.block, [x.rfid_block.bytes])
client.send(prepare_container(common_data.device, common_data.command))
@message_handler(DEV_RFID, CMD_RFID_WRITE_BLOCKS)
def _(common_data, client):
print "command name", "CMD_RFID_WRITE_BLOCKS"
x = struct_write_blocks.parse(common_data.data)
print "serial_number", array_to_string(x.serial_number)
print "block", x.block
print "number", x.number
print "bytes", [i.bytes for i in x.rfid_block]
client.write_tag(array_to_string(x.serial_number), x.block, [i.bytes for i in x.rfid_block])
client.send(prepare_container(common_data.device, common_data.command))
@message_handler(DEV_RFID, CMD_RFID_SET_RO_BLOCK)
def _(common_data, client):
print "command name", "CMD_RFID_SET_RO_BLOCK"
x = struct_read_block.parse(common_data.data)
print "serial_number", array_to_string(x.serial_number)
print "block", x.block
client.mark_tag_ro_blocks(array_to_string(x.serial_number), x.block, 1)
@message_handler(DEV_RFID, CMD_RFID_SET_RO_BLOCKS)
def _(common_data, client):
print "command name", "CMD_RFID_SET_RO_BLOCKS"
x = struct_read_blocks.parse(common_data.data)
print "serial_number", array_to_string(x.serial_number)
print "block", x.block
print "number", x.number
client.mark_tag_ro_blocks(array_to_string(x.serial_number), x.block, x.number)
@message_handler(DEV_RFID, CMD_RFID_IS_READONLY)
def _(common_data, client):
print "command name", "CMD_RFID_IS_READONLY"
x = struct_read_blocks.parse(common_data.data)
print "serial_number", array_to_string(x.serial_number)
print "block", x.block
print "number", x.number
ro_blocks = client.get_tag(x.serial_number)["ro_blocks"]
security_data = struct_security_status.build(Container(byte=[1 if x in ro_blocks else 0 for x in range(x.block, x.number)]))
client.send(prepare_container(common_data.device, common_data.command, security_data))
@message_handler(DEV_PRINTER, CMD_PRINTER_CLEAR_BUFFER)
def _(common_data, client):
client.reset_printer_buffer()
@message_handler(DEV_PRINTER, [CMD_PRINTER_LOAD_COMP_BUFFER, CMD_PRINTER_LOAD_BUFFER])
def _(common_data, client):
x = struct_print_buffer.parse(common_data.data)
if x.clear_buffer > 0:
client.reset_printer_buffer()
# print len(data), len(x.stream), size, x.size
stream_data = x.stream
if common_data.command == CMD_PRINTER_LOAD_COMP_BUFFER: # Expand the data if it compressed
stream_data = expand_printer_data(stream_data)
client.add_data_to_printer_buffer(stream_data)
if x.do_print > 0:
client.do_print()
|
agpl-3.0
| 1,663,717,867,268,342,800
| 34.979079
| 180
| 0.674148
| false
| 3.193093
| false
| false
| false
|
justas-/pyledbat
|
pyledbat/ledbat/baseledbat.py
|
1
|
6716
|
"""
Copyright 2017, J. Poderys, Technical University of Denmark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This is a base implementation of LEDBAT following the [RFC6817] for LEDBAT
specification. This file is not enough on its own, and must be extended to
gate the sending. An example of such extending is provided by simpleledbat
implementation and in the test application.
"""
import time
import datetime
import math
import logging
class BaseLedbat(object):
"""Base class with constante defined"""
CURRENT_FILTER = 8 # Number of elements in current delay filter
BASE_HISTORY = 10 # Number of elements in base delay history
INIT_CWND = 2 # Number of MSSes in initial cwnd value
MSS = 1500 # Maximum segment size
TARGET = 50 # Target in milliseconds. Per [RFC6817] must be <= 100ms
GAIN = 1 # Congestion window to delay response rate
ALLOWED_INCREASE = 1
MIN_CWND = 2
def __init__(self, **kwargs):
"""Initialize the instance"""
self._current_delays = BaseLedbat.CURRENT_FILTER * [1000000]
self._base_delays = BaseLedbat.BASE_HISTORY * [float('inf')]
self._flightsize = 0
self._cwnd = BaseLedbat.INIT_CWND * BaseLedbat.MSS # Congestion window
self._last_rollover = time.time() # Time last base-delay rollover occured
self._cto = 1 # Congestion timeout (seconds)
self._queuing_delay = 0
self._rtt = None # Round Trip Time
self._last_data_loss = 0 # When was latest dataloss event observed
self._last_ack_received = None # When was the last ACK received
# Change defaults if given:
for key, value in kwargs.items():
if key == 'set_current_filter':
BaseLedbat.CURRENT_FILTER = value
elif key == 'set_base_history':
BaseLedbat.BASE_HISTORY = value
elif key == 'set_init_cwnd':
BaseLedbat.INIT_CWND = value
elif key == 'set_mss':
BaseLedbat.MSS = value
elif key == 'set_target':
BaseLedbat.TARGET = value
elif key == 'set_gain':
BaseLedbat.GAIN = value
elif key == 'set_allowed_increase':
BaseLedbat.ALLOWED_INCREASE = value
elif key == 'set_min_cwnd':
BaseLedbat.MIN_CWND = value
else:
# Fall through option so logging is not done
continue
logging.info('LEDBAT parameter changed: %s => %s', key, value)
def _ack_received(self, bytes_acked, ow_delays, rtt_delays):
"""Parse the received delay sample(s)
delays is milliseconds, rt_measurements in seconds!
"""
# Update time of last ACK
self._last_ack_received = time.time()
# Process all received delay samples
for delay_sample in ow_delays:
self._update_base_delay(delay_sample)
self._update_current_delay(delay_sample)
# Update values
self._queuing_delay = self._filter_alg(self._current_delays) - min(self._base_delays)
off_target = (BaseLedbat.TARGET - self._queuing_delay) / BaseLedbat.TARGET
self._cwnd += int(BaseLedbat.GAIN * off_target * bytes_acked * BaseLedbat.MSS / self._cwnd)
max_allowed_cwnd = self._flightsize + BaseLedbat.ALLOWED_INCREASE * BaseLedbat.MSS
self._cwnd = min([self._cwnd, max_allowed_cwnd])
self._cwnd = max([self._cwnd, BaseLedbat.MIN_CWND * BaseLedbat.MSS])
self._flightsize = max([0, self._flightsize - bytes_acked])
self._update_cto(rtt_delays)
def data_loss(self, will_retransmit=True, loss_size=None):
"""Reduce cwnd if data loss is experienced"""
# Get the current time
t_now = time.time()
if loss_size is None:
loss_size = BaseLedbat.MSS
# Prevent calling too often
if self._last_data_loss != 0:
if t_now - self._last_data_loss < self._rtt:
# At most once per RTT
return
# Save time when last dataloss event happened
self._last_data_loss = t_now
# Reduce the congestion window size
self._cwnd = min([
self._cwnd,
int(max([self._cwnd / 2, BaseLedbat.MIN_CWND * BaseLedbat.MSS]))
])
# Account for data in-flight
if not will_retransmit:
self._flightsize = self._flightsize - loss_size
def _no_ack_in_cto(self):
"""Update CWND if no ACK was received in CTO"""
self._cwnd = 1 * BaseLedbat.MSS
self._cto = 2 * self._cto
def _update_cto(self, rtt_values):
"""Calculate congestion timeout (CTO)"""
pass
def _filter_alg(self, filter_data):
"""Implements FILTER() algorithm"""
# Implemented per [RFC6817] MIN filter over a small window
# multiplied by -1 to get latest window_size values
window_size = -1 * math.ceil(self.BASE_HISTORY/4)
return min(filter_data[window_size:])
def _update_base_delay(self, delay):
"""Update value in base_delay tracker list"""
t_now = time.time()
# Implemented per [RFC6817]
minute_now = datetime.datetime.fromtimestamp(t_now).minute
minute_then = datetime.datetime.fromtimestamp(self._last_rollover).minute
if minute_now != minute_then:
# Shift value at next minute
self._last_rollover = t_now
self._base_delays = self._base_delays[1:]
self._base_delays.append(delay)
else:
# For each measurements during the same minute keep minimum value
# at the end of the list
self._base_delays[-1] = min([self._base_delays[-1], delay])
def _update_current_delay(self, delay):
"""Add new value to the current delays list"""
# Implemented per [RFC6817]
self._current_delays = self._current_delays[1:]
self._current_delays.append(delay)
|
apache-2.0
| 1,560,566,274,252,504,300
| 38.274854
| 99
| 0.599315
| false
| 3.877598
| false
| false
| false
|
xdlinux/xidian-scripts
|
Python/get_xidian_news.py
|
1
|
4476
|
# Copyright (C) 2020 by the XiDian Open Source Community.
#
# This file is part of xidian-scripts.
#
# xidian-scripts is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# xidian-scripts is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with xidian-scripts. If not, see <http://www.gnu.org/licenses/>.
'''
爬取西电新闻网首页新闻
python3.x + request + pyquery
'''
import requests as rq
from pyquery import PyQuery as pq
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'}
root_url = 'https://news.xidian.edu.cn/'
news_dic = []
def req(url):
#获取页面函数
response = rq.get(url, headers=headers)
response.encoding = 'utf-8'
html = response.text
doc = pq(html)
return doc
def extract(doc):
'''
提取首页url函数
doc1是banner头条新闻部分,banner下面有三栏,
doc2是第一栏工作动态
doc3是第二栏是全网新闻
第三栏是热点新闻,调用API方法所以另写函数
其他页面没啥意思,如果想写的话可以自己写,parse可以通用
'''
urls = []
doc1 = doc('.content1_left')
doc1('.content1_left_top tbody tr:first-child').remove()
doc1('.content1_left_bottom_top').remove()
for url in doc1('a').items():
urls.append(url.attr.href)
doc2 = doc('.gzdt_bottom ul')
for url in doc2('li a:last-child').items():
urls.append(url.attr.href)
doc3 = doc('.mtxd_bottom')
for url in doc3('a').items():
if(url.attr.href[0]=='i'):
urls.append(url.attr.href)
dic4 = get_hot_news()
for dic in dic4:
urls.append(dic['linkurl'])
return urls
def parse(url):
#子页面处理函数
doc = req(root_url + url)
doc('#wz_zw img').remove()
doc('#wz_zw span').remove()
tag = doc('.yaowen-a').text()
title = doc('.neirong-bt').text()
date = doc('#date').text()[5:21] #发布时间:2020-12-01 08:52:41 自行调整切片
source = doc('#from').text()
author = doc('.editor').text() #责任编辑:XXX 自行调整切片
content = doc('#wz_zw p').text() #如果需要换行符请手写re,默认段与段直接以空格间隔
#首个图片的链接
if doc('.img_vsb_content').attr.src:
picurl = root_url[0:-1] + doc('.img_vsb_content').attr.src
else:
picurl = ''
news_dic.append(dict(zip(["tag", "title", "date", "author", "content", "picurl"],
[ tag , title , date , author, content , picurl ])))
def get_hot_news():
#因为这个热点新闻是调用API获取的,所以另写函数
data = {
'owner':'1271716923',
'treeid':'1001',
'viewid':'189460',
'mode':'10',
'locale':'zh_CN',
'pageUrl':'%2Findex.htm',
'uniqueId':'u38',
'actionmethod':'getnewslist'
}
json_raw = rq.post('https://news.xidian.edu.cn/system/resource/js/news/hotdynpullnews.jsp',data=data)
return eval(json_raw.text)
if __name__ == '__main__':
doc = req(root_url)
urls = extract(doc)
#爱护学校服务器,测试请取urls切片
for url in urls[25:30]:
parse(url)
print(news_dic)
'''
输出格式示例
[{
'tag': '西电要闻',
'title': '西电举办第五届“三好三有”研究生导学团队评审会',
'date': '2020-11-30 09:38',
'author': ' 责任编辑:冯毓璇',
'content': '西电新闻网讯(通讯员 霍学浩 高宇星)11月27日下午,西安电子科技大学第五届“三好三有”研究 生导学团队评审会在北校区大礼堂举行...',
'picurl': 'https://news.xidian.edu.cn/__local/F/9A/57/DD2D65A251C04AE5C33ADA469B3_E66B88F8_4CA34.jpg'
}, {
'tag': '西电要闻',
'title': '师德标兵|秦枫:知行合一的“大先生”',
'date': '2020-12-01 10:26',
'author': '责任编辑:冯毓璇',
'content': ' ■学生记者 彭怡乐 宫懿伦 赵晨晋 顾启宇 自1992年任教以来,秦枫已在三尺讲台上辛勤耕耘了28年,她精进自身、知行合一、严谨治学...',
'picurl': 'https://news.xidian.edu.cn/__local/E/EC/25/D514D9A10754ADA29CCDB064439_93C52D97_7C020.jpg'
}]
'''
|
lgpl-3.0
| -4,134,377,156,185,125,400
| 24.333333
| 141
| 0.669441
| false
| 1.890355
| false
| false
| false
|
ashleywaite/django-more
|
django_enum/operations.py
|
1
|
17196
|
from enum import Enum
from operator import attrgetter
from django.db import models
from django.db.models import sql
from django.db.models.deletion import Collector
from django.utils import six
from django_types.operations import CustomTypeOperation
from .fields import EnumField
"""
Use a symbol = value style as per Enum expectations.
Where a value is the human readable or sensible value, and the symbol is the
constant or programming flag to use.
For readbility of the database values, the human readable values are used.
"""
class EnumState:
@classmethod
def values(cls):
return [em.value for em in cls]
@classmethod
def values_set(cls):
return set(cls.values())
def enum_state(values, name=None, app_label=None):
""" Create an EnumState representing the values or Enum """
if isinstance(values, type) and issubclass(values, Enum):
if not name:
name = values.__name__
values = (em.value for em in values)
elif not name:
name = 'Unnamed Enum'
e = Enum(name, [(v, v) for v in values], type=EnumState)
e.Meta = type('Meta', (object,), {})
e.Meta.app_label = app_label
return e
class SQLCollector(Collector):
""" Collector that generates the required deletion SQL instead of performing it """
def as_sql(self):
""" Generate SQL queries that perform related deletion """
# List of (sql, params) tuples to perform deletion
query_list = []
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
self.sort()
# Do not send pre_delete signals as in .delete()
# Fast deletes
for qs in self.fast_deletes:
# TODO Check for any potential caveats from complex queries - assume none are generated by Collector
# Clone queryset into DeleteQuery to use .as_sql()
query_list.append(qs.query.clone(klass=sql.DeleteQuery).get_compiler(self.using).as_sql())
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.add_update_values({field.name: value})
query.add_q(models.Q(pk__in=[obj.pk for obj in instances]))
query_list.append(query.get_compiler(using=self.using).as_sql())
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.where = query.where_class()
query.add_q(models.Q(pk__in=pk_list))
query_list.append(query.get_compiler(using=self.using).as_sql())
# Do not update instances as in .delete()
return query_list
class EnumOperation(CustomTypeOperation):
field_type = EnumField
class CreateEnum(EnumOperation):
def __init__(self, db_type, values):
# Values follow Enum functional API options to specify
self.db_type = db_type
self.values = values
def describe(self):
return 'Create enum type {db_type}'.format(db_type=self.db_type)
def state_forwards(self, app_label, state):
enum = enum_state(self.values, name=self.db_type, app_label=app_label)
state.add_type(self.db_type, enum)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.db_type,
'values': ', '.join(['%s'] * len(enum))}
schema_editor.execute(sql, enum.values())
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
schema_editor.execute(sql)
class RemoveEnum(EnumOperation):
def __init__(self, db_type):
self.db_type = db_type
def describe(self):
return 'Remove enum type {db_type}'.format(db_type=self.db_type)
def state_forwards(self, app_label, state):
# TODO Add dependency checking and cascades
state.remove_type(self.db_type)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
schema_editor.execute(sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.db_type,
'values': ', '.join(['%s'] * len(enum))}
schema_editor.execute(sql, enum.values())
class RenameEnum(EnumOperation):
def __init__(self, old_type, new_type):
self.old_db_type = old_type
self.db_type = new_type
def describe(self):
return 'Rename enum type {old} to {new}'.format(
old=self.old_db_type,
new=self.db_type)
def state_forwards(self, app_label, state):
old_enum = state.db_types[self.old_db_type]
enum = enum_state(old_enum, name=self.db_type, app_label=app_label)
state.remove_type(self.old_db_type)
state.add_type(self.db_type, enum)
# Update all fields using this enum
for info in self.get_fields(state, db_type=self.old_db_type):
changed_field = info.field.clone()
changed_field.type_name = self.db_type
info.model_state.fields[info.field_index] = (info.field_name, changed_field)
state.reload_model(info.model_app_label, info.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.features.requires_enum_declaration:
sql = schema_editor.sql_rename_enum % {
'old_type': self.old_db_type,
'enum_type': self.db_type}
schema_editor.execute(sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.old_db_type, self.db_type = self.db_type, self.old_db_type
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.old_db_type, self.db_type = self.db_type, self.old_db_type
class AlterEnum(EnumOperation):
temp_db_type = 'django_enum_temp'
transition_db_type = 'django_enum_transition'
def __init__(self, db_type, add_values=None, remove_values=None, on_delete=models.PROTECT):
self.db_type = db_type
self.add_values = set(add_values or ())
self.remove_values = set(remove_values or ())
self.on_delete = on_delete
def describe(self):
return 'Alter enum type {db_type},{added}{removed}'.format(
db_type=self.db_type,
added=' added {} value(s)'.format(len(self.add_values)) if self.add_values else '',
removed=' removed {} value(s)'.format(len(self.remove_values)) if self.remove_values else '')
def state_forwards(self, app_label, state):
from_enum = state.db_types[self.db_type]
to_enum = enum_state((from_enum.values_set() | self.add_values) - self.remove_values, name=self.db_type, app_label=app_label)
state.add_type(self.db_type, to_enum)
# Update all fields using this enum
for info in self.get_fields(state):
changed_field = info.field.clone()
changed_field.type_def = to_enum
info.model_state.fields[info.field_index] = (info.field_name, changed_field)
state.reload_model(info.model_app_label, info.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# Compare from_state and to_state and generate the appropriate ALTER commands
pre_actions = []
post_actions = []
# Make sure ORM is ready for use
from_state.clear_delayed_apps_cache()
db_alias = schema_editor.connection.alias
# Get field/model list
fields = [
(from_model, to_model, from_field, self.on_delete or from_field.on_delete)
for info in self.get_fields(from_state)
for from_model in [from_state.apps.get_model(info.model_app_label, info.model_name)]
for from_field in [from_model._meta.get_field(info.field_name)]
for to_model in [to_state.apps.get_model(info.model_app_label, info.model_name)]
]
if self.remove_values:
# The first post delete actions are to finalise the field types
if schema_editor.connection.features.has_enum:
if schema_editor.connection.features.requires_enum_declaration:
sql_alter_column_type = getattr(
schema_editor,
'sql_alter_column_type_using',
schema_editor.sql_alter_column_type)
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': sql_alter_column_type % {
'column': db_field,
'type': self.temp_db_type,
'old_type': self.db_type}}
post_actions.append((sql, []))
else:
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
new_field = to_model._meta.get_field(field.name)
db_type, params = new_field.db_type(schema_editor.connection).paramatized
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
post_actions.append((sql, params))
if self.add_values:
# If there's the possibility of inconsistent actions, use transition type
# ie, ADD VALUE 'new_val' and REMOVE VALUE 'rem_val' ON DELETE SET('new_val')
# On DB's without enum support this isn't necessary as they are always CHAR
transition_fields = [
(from_model, field)
for (from_model, to_model, field, on_delete) in fields
if hasattr(on_delete, 'deconstruct')
or (on_delete == models.SET_DEFAULT and field.get_default() in self.add_values)]
if transition_fields and schema_editor.connection.features.has_enum:
transition_values = to_state.db_types[self.db_type].values_set() | self.remove_values
transition_enum = enum_state(transition_values, 'transitional_enum')
if schema_editor.connection.features.requires_enum_declaration:
# Create transition type
sql = schema_editor.sql_create_enum % {
'enum_type': self.transition_db_type,
'choices': ', '.join(['%s'] * len(transition_values))}
pre_actions.append((sql, list(transition_values)))
# Drop transition type after done
sql = schema_editor.sql_delete_enum % {
'enum_type': self.transition_db_type}
post_actions.append((sql, []))
# Set fields to transition type
for (model, field) in transition_fields:
db_table = schema_editor.quote_name(model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
field.type_name = self.transition_db_type
field.type_def = transition_enum
db_type, params = field.db_type(schema_editor.connection).paramatized
sql = schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
pre_actions.append((sql, params))
if schema_editor.connection.features.requires_enum_declaration:
# Create new type with temporary name
to_enum = to_state.db_types[self.db_type]
sql = schema_editor.sql_create_enum % {
'enum_type': self.temp_db_type,
'values': ', '.join(['%s'] * len(to_enum))}
pre_actions.append((sql, to_enum.values()))
# Clean up original type and rename new one to replace it
sql = schema_editor.sql_delete_enum % {
'enum_type': self.db_type}
post_actions.append((sql, []))
sql = schema_editor.sql_rename_enum % {
'old_type': self.temp_db_type,
'enum_type': self.db_type}
post_actions.append((sql, []))
elif self.add_values:
# Just adding values? Directly modify types, no hassle!
if schema_editor.connection.features.requires_enum_declaration:
for value in self.add_values:
sql = schema_editor.sql_alter_enum % {
'enum_type': self.db_type,
'value': '%s'}
post_actions.append((sql, [value]))
elif schema_editor.connection.features.has_enum:
for (from_model, to_model, field, on_delete) in fields:
db_table = schema_editor.quote_name(from_model._meta.db_table)
db_field = schema_editor.quote_name(field.column)
new_field = to_model._meta.get_field(field.name)
db_type, params = new_field.db_type(schema_editor.connection).paramatized
schema_editor.sql_alter_column % {
'table': db_table,
'changes': schema_editor.sql_alter_column_type % {
'column': db_field,
'type': db_type}}
post_actions.append((sql, params))
# Prepare database for data to be migrated
for sql, params in pre_actions:
schema_editor.execute(sql, params)
# Apply all on_delete actions making data consistent with to_state values
if self.remove_values:
# Cheap hack to allow on_delete to work
for (from_model, to_model, field, on_delete) in fields:
field.remote_field = self
# Records affected by on_delete action
on_delete_gen = ((
field,
from_model.objects.using(db_alias).filter(
models.Q(('{}__in'.format(field.name), self.remove_values))
).only('pk'),
on_delete)
for (from_model, to_model, field, on_delete) in fields)
# Validate on_delete constraints
collector = SQLCollector(using=db_alias)
for (field, qs, on_delete) in on_delete_gen:
if qs:
# Trigger the on_delete collection directly
on_delete(collector, field, qs, db_alias)
for sql, params in collector.as_sql():
# Use SQLCollector.as_sql() instead of directly executing
# Such that manage.py sqlmigration correctly reflects all actions
schema_editor.execute(sql, params)
# Apply final changes
for sql, params in post_actions:
schema_editor.execute(sql, params)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.add_values, self.remove_values = self.remove_values, self.add_values
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.add_values, self.remove_values = self.remove_values, self.add_values
|
bsd-3-clause
| 4,471,117,172,481,644,500
| 44.734043
| 133
| 0.573447
| false
| 4.042313
| false
| false
| false
|
fukun07/neural-image-captioning
|
codes/pycoco/bleu/bleu.py
|
1
|
1260
|
#!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
from bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(sorted(gts.keys()) == sorted(res.keys()))
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
#score, scores = bleu_scorer.compute_score(option='average', verbose=0)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
|
mit
| -4,045,538,868,026,867,700
| 26.391304
| 79
| 0.569841
| false
| 3.165829
| false
| false
| false
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/catalog/CatalogChatItem.py
|
1
|
5689
|
from panda3d.core import *
from panda3d.direct import *
import CatalogItem
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPLocalizer
from toontown.toonbase import TTLocalizer
bannedPhrases = [11009]
class CatalogChatItem(CatalogItem.CatalogItem):
def makeNewItem(self, customIndex):
self.customIndex = customIndex
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
if self in avatar.onOrder or self in avatar.mailboxContents or self in avatar.onGiftOrder or self in avatar.awardMailboxContents or self in avatar.onAwardOrder:
return 1
return avatar.customMessages.count(self.customIndex) != 0
def getTypeName(self):
return TTLocalizer.ChatTypeName
def getName(self):
return TTLocalizer.ChatItemQuotes % OTPLocalizer.CustomSCStrings[self.customIndex]
def getDisplayName(self):
return OTPLocalizer.CustomSCStrings[self.customIndex]
def recordPurchase(self, avatar, optional):
if avatar.customMessages.count(self.customIndex) != 0:
return ToontownGlobals.P_ReachedPurchaseLimit
if len(avatar.customMessages) >= ToontownGlobals.MaxCustomMessages:
if optional >= 0 and optional < len(avatar.customMessages):
del avatar.customMessages[optional]
if len(avatar.customMessages) >= ToontownGlobals.MaxCustomMessages:
return ToontownGlobals.P_NoRoomForItem
avatar.customMessages.append(self.customIndex)
avatar.d_setCustomMessages(avatar.customMessages)
return ToontownGlobals.P_ItemAvailable
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptChat
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def output(self, store = -1):
return 'CatalogChatItem(%s%s)' % (self.customIndex, self.formatOptionalData(store))
def compareTo(self, other):
return self.customIndex - other.customIndex
def getHashContents(self):
return self.customIndex
def getBasePrice(self):
if self.customIndex >= 10000:
return 150
return 100
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.customIndex = di.getUint16()
text = OTPLocalizer.CustomSCStrings[self.customIndex]
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint16(self.customIndex)
def acceptItem(self, mailbox, index, callback):
if len(base.localAvatar.customMessages) < ToontownGlobals.MaxCustomMessages:
mailbox.acceptItem(self, index, callback)
else:
self.showMessagePickerOnAccept(mailbox, index, callback)
def requestPurchase(self, phone, callback):
if len(base.localAvatar.customMessages) < ToontownGlobals.MaxCustomMessages:
CatalogItem.CatalogItem.requestPurchase(self, phone, callback)
else:
self.showMessagePicker(phone, callback)
def showMessagePicker(self, phone, callback):
self.phone = phone
self.callback = callback
import CatalogChatItemPicker
self.messagePicker = CatalogChatItemPicker.CatalogChatItemPicker(self.__handlePickerDone, self.customIndex)
self.messagePicker.show()
def showMessagePickerOnAccept(self, mailbox, index, callback):
self.mailbox = mailbox
self.callback = callback
self.index = index
import CatalogChatItemPicker
self.messagePicker = CatalogChatItemPicker.CatalogChatItemPicker(self.__handlePickerOnAccept, self.customIndex)
self.messagePicker.show()
def __handlePickerOnAccept(self, status, pickedMessage = None):
print 'Picker Status%s' % status
if status == 'pick':
self.mailbox.acceptItem(self, self.index, self.callback, pickedMessage)
else:
print 'picker canceled'
self.callback(ToontownGlobals.P_UserCancelled, None, self.index)
self.messagePicker.hide()
self.messagePicker.destroy()
del self.messagePicker
del self.callback
del self.mailbox
return
def __handlePickerDone(self, status, pickedMessage = None):
if status == 'pick':
CatalogItem.CatalogItem.requestPurchase(self, self.phone, self.callback, pickedMessage)
self.messagePicker.hide()
self.messagePicker.destroy()
del self.messagePicker
del self.callback
del self.phone
def getPicture(self, avatar):
chatBalloon = loader.loadModel('phase_3/models/props/chatbox')
chatBalloon.find('**/top').setPos(1, 0, 5)
chatBalloon.find('**/middle').setScale(1, 1, 3)
frame = self.makeFrame()
chatBalloon.reparentTo(frame)
chatBalloon.setPos(-2.19, 0, -1.74)
chatBalloon.setScale(0.4)
self.hasPicture = True
return (frame, None)
def getChatRange(fromIndex, toIndex, *otherRanges):
list = []
froms = [fromIndex]
tos = [toIndex]
i = 0
while i < len(otherRanges):
froms.append(otherRanges[i])
tos.append(otherRanges[i + 1])
i += 2
for chatId in OTPLocalizer.CustomSCStrings.keys():
for fromIndex, toIndex in zip(froms, tos):
if chatId >= fromIndex and chatId <= toIndex and chatId not in bannedPhrases:
list.append(CatalogChatItem(chatId))
return list
|
apache-2.0
| 6,819,379,785,983,695,000
| 37.181208
| 168
| 0.683951
| false
| 3.956189
| false
| false
| false
|
Audaces/ml
|
src/po2ml/__main__.py
|
1
|
3322
|
#!/usr/bin/env python3
'''{executable_name}: Importer for .po files to Audaces ML new translation format.
Usage:
{executable_name}: [--mark <mark>] [<file>]
This program imports .po (gettext translation) files to Audaces ML .tra format.
If a custom mark is set through --mark, it wil be placed on both ends of the
identifier as the default translation string for untranslated strings in the
.po file.
If no file is passed as an argument, the program reads from stdin.
Output is written to stdout.
'''
import sys
import re
from ml import tra_file
sys.stdout = open(1, 'w', encoding='utf-8', newline='\n', closefd=False)
def strip_uninteresting(file_handler):
'''Removes irrelevant lines, or features the importer can't handle.
This function takes a (presumably text, we hope) file and returns
a list of strings containing only the strings deemed 'interesting',
that is, the strings that start with "msgid", "msgstr" or "msgstr[0]",
in THE SAME ORDER as they were in the file (really important).
'''
return [line.strip() for line in file_handler if line.startswith("msgid ")
or line.startswith("msgstr ")
or line.startswith("msgstr[0] ")
or line.startswith('"')]
def concatenate_strings(input):
'''Concatenates every string in an input describing strings.
This function takes as its input a string containing a sequence of
strings, delimited by '"'.
'''
strings = re.findall(r'"((?:\\"|.)*?)"', input)
return ''.join(strings)
def make_tuples(lines):
'''Actual parsing of the po file.
This function takes a list of lines in the format returned by
strip_uninteresting (check its docstring if needed) and pairs them up in
a (msgid, msgstr) manner. This creates an output similar to the one used
in the ml module.
The input to the function is assumed to be correct already, as in no
unpaired or out of order items are given.
'''
joined = ' '.join(lines)
pieces = re.split(r'\s*msg(?:id|str)\s*', joined.strip())
strings = [concatenate_strings(string) for string in pieces if string]
result = []
while strings:
msgid, msgstr, *strings = strings
if msgid:
result.append((msgid, msgstr))
return result
def parse_file(file_handler):
'''Combines removal of uninteresting lines and the actual parsing.
This function merely applies any marks needed to the output of make_tuples
applied in a given file. In case of need, check their docstrings for an
in-depth view of what they do.
'''
return (make_tuples(strip_uninteresting(file_handler)))
def main():
'''Main logic for the importer.
Main function for this program. It parses arguments looking for the
definition of a custom mark, and applies parse_file() to the given input
(file or stdin).
'''
args = sys.argv[1:]
if len(args) > 1:
print(__doc__.format(executable_name=sys.argv[0]))
sys.exit(-1)
if args:
filename = args[0]
else:
filename = 0
try:
with open(filename, encoding='utf-8') as file_handler:
print(tra_file(parse_file(file_handler)))
except FileNotFoundError:
print(filename, 'is not a valid file.')
if __name__ == '__main__':
main()
|
mit
| 3,981,279,307,848,930,000
| 29.759259
| 82
| 0.664359
| false
| 3.945368
| false
| false
| false
|
looker/sentry
|
src/sentry/models/file.py
|
1
|
14550
|
"""
sentry.models.file
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import six
import mmap
import tempfile
from hashlib import sha1
from uuid import uuid4
from concurrent.futures import ThreadPoolExecutor
from django.conf import settings
from django.core.files.base import File as FileObj
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.db import models, transaction
from django.utils import timezone
from jsonfield import JSONField
from sentry.app import locks
from sentry.db.models import (BoundedPositiveIntegerField, FlexibleForeignKey, Model)
from sentry.tasks.files import delete_file as delete_file_task
from sentry.utils import metrics
from sentry.utils.retries import TimedRetryPolicy
ONE_DAY = 60 * 60 * 24
DEFAULT_BLOB_SIZE = 1024 * 1024 # one mb
CHUNK_STATE_HEADER = '__state'
def enum(**named_values):
return type('Enum', (), named_values)
ChunkFileState = enum(
OK='ok', # File in database
NOT_FOUND='not_found', # File not found in database
CREATED='created', # File was created in the request and send to the worker for assembling
ASSEMBLING='assembling', # File still being processed by worker
ERROR='error' # Error happened during assembling
)
class AssembleChecksumMismatch(Exception):
pass
def get_storage():
from sentry import options
backend = options.get('filestore.backend')
options = options.get('filestore.options')
try:
backend = settings.SENTRY_FILESTORE_ALIASES[backend]
except KeyError:
pass
storage = get_storage_class(backend)
return storage(**options)
class FileBlob(Model):
__core__ = False
path = models.TextField(null=True)
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=40, unique=True)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblob'
@classmethod
def from_file(cls, fileobj):
"""
Retrieve a list of FileBlobIndex instances for the given file.
If not already present, this will cause it to be stored.
>>> blobs = FileBlob.from_file(fileobj)
"""
size = 0
checksum = sha1(b'')
for chunk in fileobj:
size += len(chunk)
checksum.update(chunk)
checksum = checksum.hexdigest()
# TODO(dcramer): the database here is safe, but if this lock expires
# and duplicate files are uploaded then we need to prune one
lock = locks.get('fileblob:upload:{}'.format(checksum), duration=60 * 10)
with TimedRetryPolicy(60)(lock.acquire):
# test for presence
try:
existing = FileBlob.objects.get(checksum=checksum)
except FileBlob.DoesNotExist:
pass
else:
return existing
blob = cls(
size=size,
checksum=checksum,
)
blob.path = cls.generate_unique_path(blob.timestamp)
storage = get_storage()
storage.save(blob.path, fileobj)
blob.save()
metrics.timing('filestore.blob-size', size)
return blob
@classmethod
def generate_unique_path(cls, timestamp):
pieces = [six.text_type(x) for x in divmod(int(timestamp.strftime('%s')), ONE_DAY)]
pieces.append(uuid4().hex)
return u'/'.join(pieces)
def delete(self, *args, **kwargs):
lock = locks.get('fileblob:upload:{}'.format(self.checksum), duration=60 * 10)
with TimedRetryPolicy(60)(lock.acquire):
if self.path:
self.deletefile(commit=False)
super(FileBlob, self).delete(*args, **kwargs)
def deletefile(self, commit=False):
assert self.path
delete_file_task.delay(self.path, self.checksum)
self.path = None
if commit:
self.save()
def getfile(self):
"""
Return a file-like object for this File's content.
>>> with blob.getfile() as src, open('/tmp/localfile', 'wb') as dst:
>>> for chunk in src.chunks():
>>> dst.write(chunk)
"""
assert self.path
storage = get_storage()
return storage.open(self.path)
class File(Model):
__core__ = False
name = models.TextField()
type = models.CharField(max_length=64)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
headers = JSONField()
blobs = models.ManyToManyField('sentry.FileBlob', through='sentry.FileBlobIndex')
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=40, null=True, db_index=True)
# <Legacy fields>
# Remove in 8.1
blob = FlexibleForeignKey('sentry.FileBlob', null=True, related_name='legacy_blob')
path = models.TextField(null=True)
# </Legacy fields>
class Meta:
app_label = 'sentry'
db_table = 'sentry_file'
def _get_chunked_blob(self, mode=None, prefetch=False,
prefetch_to=None, delete=True):
return ChunkedFileBlobIndexWrapper(
FileBlobIndex.objects.filter(
file=self,
).select_related('blob').order_by('offset'),
mode=mode,
prefetch=prefetch,
prefetch_to=prefetch_to,
delete=delete
)
def getfile(self, mode=None, prefetch=False, as_tempfile=False):
"""Returns a file object. By default the file is fetched on
demand but if prefetch is enabled the file is fully prefetched
into a tempfile before reading can happen.
Additionally if `as_tempfile` is passed a NamedTemporaryFile is
returned instead which can help in certain situations where a
tempfile is necessary.
"""
if as_tempfile:
prefetch = True
impl = self._get_chunked_blob(mode, prefetch)
if as_tempfile:
return impl.detach_tempfile()
return FileObj(impl, self.name)
def save_to(self, path):
"""Fetches the file and emplaces it at a certain location. The
write is done atomically to a tempfile first and then moved over.
If the directory does not exist it is created.
"""
path = os.path.abspath(path)
base = os.path.dirname(path)
try:
os.makedirs(base)
except OSError:
pass
f = None
try:
f = self._get_chunked_blob(prefetch=True,
prefetch_to=base,
delete=False).detach_tempfile()
os.rename(f.name, path)
f.close()
f = None
finally:
if f is not None:
f.close()
try:
os.remove(f.name)
except Exception:
pass
def putfile(self, fileobj, blob_size=DEFAULT_BLOB_SIZE, commit=True):
"""
Save a fileobj into a number of chunks.
Returns a list of `FileBlobIndex` items.
>>> indexes = file.putfile(fileobj)
"""
results = []
offset = 0
checksum = sha1(b'')
while True:
contents = fileobj.read(blob_size)
if not contents:
break
checksum.update(contents)
blob_fileobj = ContentFile(contents)
blob = FileBlob.from_file(blob_fileobj)
results.append(FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
))
offset += blob.size
self.size = offset
self.checksum = checksum.hexdigest()
metrics.timing('filestore.file-size', offset)
if commit:
self.save()
return results
def assemble_from_file_blob_ids(self, file_blob_ids, checksum, commit=True):
"""
This creates a file, from file blobs and returns a temp file with the
contents.
"""
tf = tempfile.NamedTemporaryFile()
with transaction.atomic():
file_blobs = FileBlob.objects.filter(id__in=file_blob_ids).all()
# Make sure the blobs are sorted with the order provided
file_blobs = sorted(file_blobs, key=lambda blob: file_blob_ids.index(blob.id))
new_checksum = sha1(b'')
offset = 0
for blob in file_blobs:
FileBlobIndex.objects.create(
file=self,
blob=blob,
offset=offset,
)
for chunk in blob.getfile().chunks():
new_checksum.update(chunk)
tf.write(chunk)
offset += blob.size
self.size = offset
self.checksum = new_checksum.hexdigest()
if checksum != self.checksum:
raise AssembleChecksumMismatch('Checksum mismatch')
metrics.timing('filestore.file-size', offset)
if commit:
self.save()
tf.flush()
tf.seek(0)
return tf
class FileBlobIndex(Model):
__core__ = False
file = FlexibleForeignKey('sentry.File')
blob = FlexibleForeignKey('sentry.FileBlob')
offset = BoundedPositiveIntegerField()
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblobindex'
unique_together = (('file', 'blob', 'offset'), )
class ChunkedFileBlobIndexWrapper(object):
def __init__(self, indexes, mode=None, prefetch=False,
prefetch_to=None, delete=True):
# eager load from database incase its a queryset
self._indexes = list(indexes)
self._curfile = None
self._curidx = None
if prefetch:
self.prefetched = True
self._prefetch(prefetch_to, delete)
else:
self.prefetched = False
self.mode = mode
self.open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def detach_tempfile(self):
if not self.prefetched:
raise TypeError('Can only detech tempfiles in prefetch mode')
rv = self._curfile
self._curfile = None
self.close()
rv.seek(0)
return rv
def _nextidx(self):
assert not self.prefetched, 'this makes no sense'
old_file = self._curfile
try:
try:
self._curidx = six.next(self._idxiter)
self._curfile = self._curidx.blob.getfile()
except StopIteration:
self._curidx = None
self._curfile = None
finally:
if old_file is not None:
old_file.close()
@property
def size(self):
return sum(i.blob.size for i in self._indexes)
def open(self):
self.closed = False
self.seek(0)
def _prefetch(self, prefetch_to=None, delete=True):
size = self.size
f = tempfile.NamedTemporaryFile(prefix='._prefetch-',
dir=prefetch_to,
delete=delete)
if size == 0:
self._curfile = f
return
# Zero out the file
f.seek(size - 1)
f.write('\x00')
f.flush()
mem = mmap.mmap(f.fileno(), size)
def fetch_file(offset, getfile):
with getfile() as sf:
while True:
chunk = sf.read(65535)
if not chunk:
break
mem[offset:offset + len(chunk)] = chunk
offset += len(chunk)
with ThreadPoolExecutor(max_workers=4) as exe:
for idx in self._indexes:
exe.submit(fetch_file, idx.offset, idx.blob.getfile)
mem.flush()
self._curfile = f
def close(self):
if self._curfile:
self._curfile.close()
self._curfile = None
self._curidx = None
self.closed = True
def seek(self, pos):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.seek(pos)
if pos < 0:
raise IOError('Invalid argument')
for n, idx in enumerate(self._indexes[::-1]):
if idx.offset <= pos:
if idx != self._curidx:
self._idxiter = iter(self._indexes[-(n + 1):])
self._nextidx()
break
else:
raise ValueError('Cannot seek to pos')
self._curfile.seek(pos - self._curidx.offset)
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.tell()
if self._curfile is None:
return self.size
return self._curidx.offset + self._curfile.tell()
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if self.prefetched:
return self._curfile.read(n)
result = bytearray()
# Read to the end of the file
if n < 0:
while self._curfile is not None:
blob_result = self._curfile.read(32768)
if not blob_result:
self._nextidx()
else:
result.extend(blob_result)
# Read until a certain number of bytes are read
else:
while n > 0 and self._curfile is not None:
blob_result = self._curfile.read(min(n, 32768))
if not blob_result:
self._nextidx()
else:
n -= len(blob_result)
result.extend(blob_result)
return bytes(result)
class FileBlobOwner(Model):
__core__ = False
blob = FlexibleForeignKey('sentry.FileBlob')
organization = FlexibleForeignKey('sentry.Organization')
class Meta:
app_label = 'sentry'
db_table = 'sentry_fileblobowner'
unique_together = (('blob', 'organization'), )
|
bsd-3-clause
| -434,740,297,523,743,400
| 29
| 95
| 0.564948
| false
| 4.281931
| false
| false
| false
|
jopohl/urh
|
src/urh/plugins/ZeroHide/ZeroHideAction.py
|
1
|
2253
|
from PyQt5.QtWidgets import QUndoCommand
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
class ZeroHideAction(QUndoCommand):
def __init__(self, protocol: ProtocolAnalyzer, following_zeros: int, view: int, zero_hide_offsets: dict):
super().__init__()
self.protocol = protocol
self.following_zeros = following_zeros
self.viewtype = view
self.setText("Hide zero sequences >= " + str(self.following_zeros))
self.zero_hide_offsets = zero_hide_offsets
def redo(self):
factor = 1
if self.viewtype == 1:
factor = 4
elif self.viewtype == 2:
factor = 8
pa = self.protocol
self.zero_hide_offsets.clear()
for i in range(pa.num_messages):
message = pa.messages[i]
if self.viewtype == 0:
data = message.decoded_bits_str
elif self.viewtype == 1:
data = message.decoded_hex_str
else:
data = message.decoded_ascii_str
zero_sequences = self.__get_zero_seq_indexes(data, self.following_zeros)
self.zero_hide_offsets[i] = {start: end-start for start, end in zero_sequences}
for seq in reversed(zero_sequences):
full_bits = pa.messages[i].decoded_bits
start = seq[0] * factor
end = seq[1] * factor
pa.messages[i].decoded_bits = full_bits[:start] + full_bits[end:]
def undo(self):
self.zero_hide_offsets.clear()
self.protocol.clear_decoded_bits()
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
"""
:rtype: list[tuple of int]
"""
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i-zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - zero_counter, len(message)))
return result
|
gpl-3.0
| 585,459,217,494,885,800
| 31.666667
| 109
| 0.556147
| false
| 4.126374
| false
| false
| false
|
WilsonKiggundu/HospitalManagement_InformationSystem
|
hospitalmanager/app/migrations/0001_initial.py
|
1
|
1041
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-16 16:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('is_active', models.BooleanField(default=True)),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('birth_date', models.DateField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
gpl-3.0
| 8,859,425,243,024,919,000
| 33.7
| 121
| 0.614793
| false
| 4.114625
| false
| false
| false
|
aminotti/yameo
|
lib/httpmethod.py
|
1
|
11707
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2015 Anthony Minotti <anthony@minotti.cool>.
#
#
# This file is part of Yameo framework.
#
# Yameo framework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Yameo framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yameo framework. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
from flask import Response
from flask import request
from .exceptions import *
from lib import contenttype
from lib.orm.binary import Binary
from lib.orm.fields import BinaryField
# from lib.orm import ORMFilter, BinaryField, SQLRelationship, one2many
class HTTPMethods(object):
@classmethod
def dispatchMethods(cls, domain=None, relationship=None):
if domain:
cls._checkDomains(domain)
if request.method == 'GET':
return cls._getHTTP(domain, relationship)
elif request.method == 'PUT':
return cls._putHTTP(domain)
elif request.method == 'PATCH':
return cls._patchHTTP(domain)
elif request.method == 'DELETE':
return cls._deleteHTTP(domain)
elif request.method == 'POST':
return cls._postHTTP()
@classmethod
def _getHTTP(cls, domain=None, relationship=None):
# TODO add expend = True pour toucher les relation au lieux de leur id
# TODO ajouter un attribute expend = request.args.get('expend', False) pour geré si renvoi url des relations ou data completes
"""
# Manage relashionship
relationshipcls = None
for relation in one2many[cls.__name__]:
if relation.name is relationship:
relationshipcls = relation.table
break
if relationshipcls is None and relationship is not None:
raise Core500Exception("Bad relationship '{}'.".format(relationship))
"""
# Manage request arguement "fields"
fields = request.args.get('fields', None)
if fields:
fields = fields.split(",")
if type(fields) is not list:
raise Core400Exception("'{}' : bad type for fields.".format(fields))
for name in fields:
# Fields can be col of cls or name of a relationcls or if a relationship define, col of a relationship
# if not (name in cls._columns or name in (d.name for d in one2many[cls.__name__]) or (relationshipcls and name in relationshipcls._columns)):
if not (name in cls._columns):
raise Core400Exception("Bad value '{}' in fields list.".format(name))
# Check request's arguement "count"
count = request.args.get('count', None)
if count:
try:
int(count)
except ValueError:
raise Core400Exception("'{}' : bad type for count.".format(count))
# Check request's arguement "offset"
offset = request.args.get('offset', None)
if offset:
try:
int(offset)
except ValueError:
raise Core400Exception("'{}' : bad type for offset.".format(offset))
# Check request's arguement "sort"
sort = request.args.get('sort', None)
if sort:
sort = sort.split(",")
for f in sort:
if f not in cls._columns:
raise Core400Exception("Can't sort on {}. Field doesn't exist.".format(f))
# TODO Set ressource language : request 'Accept-Language' and set reponse 'Content-Language'
# langs = cls._orderHeaderByq(request.headers['Accept-Language']) # Return list of lang order by preference, Firt item is prefered one.
ressources = cls.search(domain, fields, count, offset, sort)
if len(ressources) == 0:
raise Core404Exception("Empty set")
data = list()
if fields:
fields += cls._identifiers
for r in ressources:
# create dict from all ressource's fields or selected field in request arg if provided
data.append(dict([(f, r._fields[f]) for f in (fields or cls._columns)]))
ctype, Converter = cls._getAcceptedContentType()
r = Response(Converter.fromDict(data), headers={"Content-type": "{};charset=UTF-8".format(ctype)})
r.status_code = 200
return r
# Ressource creation with auto id
@classmethod
def _postHTTP(cls):
dico = cls.getDataFromContentType()
ressource = cls(dico)
rid = ressource.create()
if rid and len(ressource._identifiers) < 2:
url = request.base_url + str(rid) + '/'
data = {"Location": url}
ctype, Converter = cls._getAcceptedContentType()
r = Response(Converter.fromDict(data), headers={"Content-type": "{};charset=UTF-8".format(ctype)})
else:
r = Response(None)
del r.headers['content-type']
r.status_code = 201
return r
# Ressource creation with ids provided
@classmethod
def _putHTTP(cls, domain):
dico = cls.getDataFromContentType()
if type(dico) is not dict:
raise Core400Exception("Bad content.")
ressource = cls(dico)
ressource.setIdFromDomain(domain)
ressource.create()
r = Response(None)
del r.headers['content-type']
r.status_code = 201
return r
# Update on one or several ressource
@classmethod
def _patchHTTP(cls, domain):
dico = cls.getDataFromContentType()
ressource = cls(dico)
ressource.update(dico.keys(), domain)
r = Response(None)
del r.headers['content-type']
r.status_code = 204
return r
# Delete one or several ressource
@classmethod
def _deleteHTTP(cls, domain):
cls.delete(domain)
r = Response(None)
del r.headers['content-type']
r.status_code = 204
return r
@classmethod
def _getAcceptedContentType(cls):
if 'Accept' in request.headers:
accepts = cls._orderHeaderByq(request.headers['Accept'])
for accept in accepts:
if accept in contenttype.Converter.keys():
return accept, contenttype.Converter[accept]
break
# Default content type is JSON
# TODO RFC2616 sect 14.1, si wrong 'Accept' header : 406 (Not Acceptable). Si * ou pas de 'Accept' alors default json
return "application/json", contenttype.Converter["application/json"]
@classmethod
def _orderHeaderByq(cls, header):
""" Order HTTP header by preference set with q=number
;return list: ordered list with firsts items as prefered
"""
ordered = dict()
for part in header.split(","):
subpart = part.split(";")
if len(subpart) > 1 and "q=" in subpart[1]:
try:
ordered[subpart[0].strip()] = float(subpart[1].strip()[2:])
except ValueError:
raise Core400Exception("'{}' : q must be a number.".format(subpart[1].strip()))
else:
ordered[subpart[0].strip()] = 1.0
return sorted(ordered, key=ordered.__getitem__, reverse=True)
@classmethod
def getDataFromContentType(cls):
# http://www.w3.org/Protocols/rfc1341/4_Content-Type.html
""" Devrais accepter que multipart/form-data, multipart/mixed,type defini dans lib.contenttype ou binaire
multipart/form-data et multipart/mixed devrais accepter que type defini dans lib.contenttype et binaire
Type binaire :
* GET|PUT /binary/<ressource>/<id1>[/<id2>]/attribute.ext
* del quand lattribut de la ressource est set a NULL (au lieux de contenir URL)
"""
# TODO documenter les different content-type possible avec leur contenu de body
for ctype, conv in contenttype.Converter.items():
if ctype in request.headers['Content-Type']:
return conv.toDict(request.data)
break
if 'application/x-www-form-urlencoded' in request.headers['Content-Type']:
# TODO gerer POST normal (x-www-form-urlencode) formulaire (voir tests/form.html)
print request.data
return dict()
elif 'multipart/form-data' in request.headers['Content-Type']:
for val in request.form.values():
# TODO Actuelement un seul attribut de form envoyer qui contient un json avec tout les fields :
# - Remplacer par : un attribut de form par field (plus de notion de content type) => voir tests/form_file.html
# - Gerer les contents type des sous part (json, xml, file,...) avec 'multipart/mixed'
dico = contenttype.Converter['application/json'].toDict(val)
for key, val in request.files.items():
# Check field name and type
col = getattr(cls, "_{}_field".format(key), None)
if col is None or not isinstance(col, BinaryField):
raise Core400Exception("Bad binary attribute : '{}'".format(key))
binary = Binary(cls.__name__.lower(), key, val.mimetype, os.path.splitext(val.filename)[1][1:], val.stream)
dico[key] = binary
return dico
elif 'multipart/mixed' in request.headers['Content-Type']:
# TODO Handle multipart/mixed, faire une lib pour gere corp http/mail :
# Extract boundary from content-type headers
# parse request.data with boundary to get dict() : {'subcontenttype1': 'data1', 'subcontenttype2':'data2', ...}
print request.data
return dict()
else:
raise Core404Exception("Forbidden Content-Type '{}'".format(request.headers['Content-Type']))
@classmethod
def _checkDomains(cls, domain):
if type(domain) is tuple:
cls._checkDomainTuple(domain)
elif type(domain) is list:
cls._checkDomainList(domain)
else:
raise Core400Exception("Invalid domain : {}, Must be list or tuple".format(str(domain)))
@classmethod
def _checkDomainList(cls, domain):
for dom in domain:
if type(dom) is str and dom in ['|', '&']:
pass
elif type(dom) is tuple:
cls._checkDomainTuple(dom)
elif type(dom) is list:
cls._checkDomainList(dom)
else:
raise Core500Exception("Invalid domain part : {}".format(str(dom)))
@classmethod
def _checkDomainTuple(cls, domain):
if len(domain) != 3:
raise Core500Exception("Invalid tuple for domain {}".format(domain))
# Check field name
if domain[0] not in cls._columns:
raise Core400Exception("Bad attribute : '{}'".format(domain[0]))
else:
# Check value type & value syntax
getattr(cls, "_{}_field".format(domain[0])).check(domain[2])
|
agpl-3.0
| 7,189,659,849,950,701,000
| 39.50519
| 158
| 0.594396
| false
| 4.259825
| false
| false
| false
|
eayunstack/oslo.messaging
|
tests/test_opts.py
|
1
|
1740
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import stevedore
import testtools
try:
from oslo.messaging import opts
except ImportError:
opts = None
from tests import utils as test_utils
class OptsTestCase(test_utils.BaseTestCase):
@testtools.skipIf(opts is None, "Options not importable")
def setUp(self):
super(OptsTestCase, self).setUp()
def _test_list_opts(self, result):
self.assertEqual(3, len(result))
groups = [g for (g, l) in result]
self.assertIn(None, groups)
self.assertIn('matchmaker_ring', groups)
self.assertIn('matchmaker_redis', groups)
opt_names = [o.name for (g, l) in result for o in l]
self.assertIn('rpc_backend', opt_names)
def test_list_opts(self):
self._test_list_opts(opts.list_opts())
def test_entry_point(self):
result = None
for ext in stevedore.ExtensionManager('oslo.config.opts',
invoke_on_load=True):
if ext.name == "oslo.messaging":
result = ext.obj
break
self.assertIsNotNone(result)
self._test_list_opts(result)
|
apache-2.0
| -172,664,514,099,144,160
| 31.222222
| 78
| 0.644828
| false
| 3.945578
| true
| false
| false
|
alexei-matveev/ccp1gui
|
interfaces/cmlatominfo.py
|
1
|
4268
|
#
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2007 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import sys
from xml.dom.minidom import parse
from xml.xpath import Evaluate
def CmlAtomInfo(xmlFile):
#get DOM object
xmldoc=open(xmlFile)
doc=parse(xmldoc)
#get the namespaces used in the xml document
nsMap = {}
ns_nodes = Evaluate('namespace::node()',doc.documentElement)
for ns in ns_nodes:
nsMap[ns.value]=ns.localName
#initialise objects
idlist=[]
attribDict={}
atomDict={}
#get atomArray nodes
#remember to check through all the namespaces
atomArrayNodes=doc.getElementsByTagName("atomArray")
for ns in nsMap.keys():
atomArrayNodes+=doc.getElementsByTagNameNS(ns,"atomArray")
#get the atom nodes for each atomArray node
#remember to check through all the namespaces
for atomArrayNode in atomArrayNodes:
atomNodes=atomArrayNode.getElementsByTagName("atom")
for ns in nsMap.keys():
atomNodes+=atomArrayNode.getElementsByTagNameNS(ns,"atom")
#check for the use of arrays (no 'atom' nodes)
atomArrayInfo={}
if atomNodes==[]:
atomArrayChildNodes=atomArrayNode.childNodes
for atomArrayChildNode in atomArrayChildNodes:
if atomArrayChildNode.nodeType==atomArrayChildNode.ELEMENT_NODE:
dataName=atomArrayChildNode.getAttribute('builtin')
subChildNodes=atomArrayChildNode.childNodes
for subChildNode in subChildNodes:
data=subChildNode.data.encode("ascii").split()
atomArrayInfo.update({dataName:data})
for i in range(0,len(atomArrayInfo['atomId'])):
for key in atomArrayInfo.keys():
#if key!='atomId':
attribDict.update({key:atomArrayInfo[key][i]})
#atomDict.update({atomArrayInfo['atomId'][i]:attribDict})
atomDict.update({i:attribDict})
attribDict={}
#get the attribute nodes for each atom node
i=0
for atomNode in atomNodes:
attrib=atomNode.attributes
for attribNode in attrib.values():
#if attribNode.name=="id":
# id=attribNode.value
# idlist.append(id)
#else:
attribDict.update({attribNode.name:attribNode.value.encode("ascii")})
#The following obtains data from CML-1 markup
#get the child nodes of each atom node
atomChildNodes=atomNode.childNodes
#get the data name of each child node
for atomChildNode in atomChildNodes:
if atomChildNode.nodeType==atomChildNode.ELEMENT_NODE:
dataName=atomChildNode.getAttribute("builtin")
#get the data value from the text node of each child element node
subAtomChildNodes=atomChildNode.childNodes
for subAtomChildNode in subAtomChildNodes:
if subAtomChildNode.nodeType==subAtomChildNode.TEXT_NODE:
dataValue=subAtomChildNode.data.encode("ascii")
attribDict.update({dataName:dataValue})
#atomDict.update({id:attribDict})
atomDict.update({i:attribDict})
attribDict={}
i=i+1
return atomDict
|
gpl-2.0
| 4,901,287,225,231,356,000
| 38.88785
| 85
| 0.6209
| false
| 4.395469
| false
| false
| false
|
dyf102/Gomoku-online
|
server/model/game.py
|
1
|
4356
|
import uuid
EMPTY = 0
BLACK = 1
WHITE = -1
STATUS_EMPTY = 11
STATUS_WAITING = 12
STATUS_FIGHTING = 13
STATUS_SET = (STATUS_EMPTY, STATUS_WAITING, STATUS_FIGHTING)
class Game(object):
def __init__(self, host_id, host_name, guest_id=None, guest_name=None):
self.host_id = host_id
self.host_name = host_name
self.guest_id = guest_id
self.guest_name = guest_name
self.host_color = self.guest_color = 0 # undefined
self.board = [[EMPTY for _ in xrange(15)] for _ in xrange(15)]
self.status = STATUS_EMPTY
self.id = str(uuid.uuid1())
def __eq__(self, other):
return self.id == other.id
def get_id(self):
return self.id
def get_status(self):
return self.status
def is_in_game(self, uid):
return uid in (self.host_id, self.guest_id)
def set_status(self, new_status):
assert new_status in STATUS_SET
self.status = new_status
def __str__(self):
return '{} vs {}'.format(self.host_name, self.guest_name)
def set_host_black(self):
self.host_color = BLACK
self.guest_color = WHITE
def set_host_white(self):
self.host_color = WHITE
self.guest_color = BLACK
def is_win(self, x, y, color):
if x < 0 or x > 15 or y < 0 or y > 15 or color == EMPTY:
return False
return self.check_x(x, y, color) or \
self.check_y(x, y, color) or \
self.check_right_diagonal(x, y, color) or \
self.check_left_diagonal(x, y, color)
def check_x(self, x, y, color):
board = self.board
counter = 0
i = x
j = x
left_move = False
right_move = False
# x axis
while board[i][y] == color and board[j][y] == color:
if i - 1 >= 0 and board[i - 1][y] == color:
i -= 1
counter += 1
left_move = True
if j + 1 < 15 and board[j + 1][y] == color:
j += 1
counter += 1
right_move = True
if counter == 4:
return True
if left_move is False and right_move is False:
break
return False
def check_y(self, x, y, color):
board = self.board
# y axis
counter = 0
i = j = y
up_move = down_move = False
while board[x][i] == color and board[x][j] == color:
if i - 1 >= 0 and board[x][i - 1] == color:
i -= 1
counter += 1
up_move = True
if j + 1 < 15 and board[x][j + 1] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
def check_right_diagonal(self, x, y, color):
board = self.board
# check diagonal
counter = 0
i = j = 0
up_move = down_move = False
while board[x + i][y - i] == color and board[x - j][y + j] == color:
if y - i >= 0 and x + i < 15 and board[x + i][y - i] == color:
i += 1
counter += 1
up_move = True
if y + j < 15 and x - j >= 0 and board[x - j][y + j] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
def check_left_diagonal(self, x, y, color):
board = self.board
# check diagonal
counter = 0
i = j = 0
up_move = down_move = False
while board[y - i][x + i] == color and board[y + j][x - j] == color:
if y - i >= 0 and x + i < 15 and board[y - i][x + i] == color:
i += 1
counter += 1
up_move = True
if y + j < 15 and x - j >= 0 and board[y + j][x - j] == color:
j += 1
counter += 1
down_move = True
if counter == 4:
return True
if down_move is False and up_move is False:
break
return False
|
apache-2.0
| -2,934,371,076,088,916,500
| 29.25
| 76
| 0.463728
| false
| 3.748709
| false
| false
| false
|
FluentTradeTechnologies/netconfigit
|
modules/devices/fortinet.py
|
1
|
4074
|
# -*- coding: utf-8 -*-
"""
Netconfigit Fortinet device class
"""
__license__ = "MIT License"
__author__ = "Eric Griffin"
__copyright__ = "Copyright (C) 2014, Fluent Trade Technologies"
__version__ = "1.1"
import logging
import os
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Fortinet(object):
"""Fortinet device class
Defines and runs device-specific actions on a device
:param _device: the device
:param _netconfigit: the netconfigit object containing the configuration
"""
def __init__(self, _device, _netconfigit):
"""Defines action commands for the device associated with the class
:param _device: the device on which actions are being run
:param _netconfigit: the netconfigit object containing the configuration
"""
self.device = _device
self.netconfigit = _netconfigit
self.command_copy_current = "exec backup config tftp " + self.device.name + "/current-config " \
+ self.netconfigit.transfer_ip + "\n"
self.command_clear_dhcp_leases = "execute dhcp lease-clear all" + "\n"
def run_action(self, action):
"""Defines and runs actions for the device associated with the class
Checks for valid action names and runs the actions
Returns 0/1 for fail/success of the action
:param action: the action to run
"""
status = 0
connected = 0
if self.device.access_type == "ssh":
try:
self.client, self.channel = self.netconfigit.get_ssh_client_channel(self.device)
connected = 1
except:
logger.error("Error connecting to " + self.device.name)
if connected == 1:
if action == "current-config":
status = self.get_config()
elif action == "clear-dhcp-leases":
status = self.clear_dhcp_leases()
else:
logger.error("Action " + action + " not implemented for " +
self.device.manufacturer.title() + " devices.")
self.client.close()
else:
logger.error("Access method " + self.device.access_type + " not implemented for " +
self.device.manufacturer.title() + " devices.")
if status == 1:
self.netconfigit.success_list.append({self.device.name: action})
if status == 0:
self.netconfigit.failure_list.append({self.device.name: action})
def get_config(self):
"""Transfers configurations from device via ssh and tftp
Issues commands to device via ssh to transfer configs to local tftp server
:param config_type: the configuration type (ie. startup-config, running-config)
:return: boolean, 0 means transfer failed, 1 means transfer was successful
"""
output = ""
success = 0
time.sleep(5)
self.channel.send(self.command_copy_current)
while not self.channel.recv_ready():
time.sleep(7)
output += self.channel.recv(1024)
self.channel.send("\n")
while not self.channel.recv_ready():
time.sleep(12)
output += self.channel.recv(1024)
if self.netconfigit.verbose == 1:
print output
if "Send config file to tftp server OK" in output:
success = 1
if "Error" in output:
success = 0
return success
def clear_dhcp_leases(self):
"""Clears all DHCP leases
:return: boolean, 0 means failure, 1 means success
"""
output = ""
success = 0
time.sleep(5)
self.channel.send(self.command_clear_dhcp_leases)
while not self.channel.recv_ready():
time.sleep(5)
output += self.channel.recv(1024)
self.channel.send("\n")
# there is no output for this command so success is always true
success = 1
return success
|
mit
| -1,249,435,471,950,509,800
| 33.243697
| 104
| 0.583702
| false
| 4.261506
| true
| false
| false
|
ajmarks/gymnast
|
gymnast/pdf_elements/pdf_page.py
|
1
|
6340
|
"""
PDF Document Page and Page Node elements
"""
import six
from .pdf_element import PdfElement
from ..exc import PdfParseError, PdfError
from ..pdf_types import PdfType, PdfRaw, PdfArray
from ..pdf_parser import PdfParser
from ..pdf_operation import PdfOperation
def parse_page(obj):
"""Parse a page like object into the appropraite subclass"""
obj = obj.value
if obj['Type'] == 'Pages':
return PdfPageNode(obj)
elif obj['Type'] == 'Page':
return PdfPage(obj)
class PdfPageResources(PdfElement):
"""Resources dict on page objects. Technically, it's not a PDF
object type, but this lets us re-use a lot of code."""
pass
class PdfAbstractPage(PdfElement):
"""Base class for PDF Pages and Page Nodes."""
def __init__(self, page, obj_key=None, document=None):
"""Create a new or node with properly inherited properties
where applicable"""
#Common and inheritable properties
super(PdfAbstractPage, self).__init__(page, obj_key, document)
self._resources = page.get('Resources')
self._mediabox = page.get('MediaBox')
self._cropbox = page.get('CropBox')
self._rotate = page.get('Rotate')
self._fonts = None
@property
def Resources(self):
"""Page resources, most notably fonts"""
if self._resources: return PdfPageResources.from_object(self._resources)
elif self._parent: return self.Parent.Resources
else: raise PdfError('Resource dictionary not found')
@property
def MediaBox(self):
"""Size of the media"""
if self._mediabox: return self._mediabox.value
elif self._parent: return self.Parent.MediaBox
else: raise PdfError('MediaBox not found')
@property
def CropBox(self):
"""Visible area of the media"""
box = self.raw_cropbox
return box if box else self.MediaBox
@property
def raw_cropbox(self):
"""Inherited CropBox with no default value"""
if self._cropbox: return self._cropbox.value
elif self._parent: return self.Parent.raw_cropbox
else: return None
@property
def Rotate(self):
"""Rotation angle. Should be an integer multiple of 90."""
if self._rotate: return self._rotate
elif self._parent: return self.Parent.Rotate
else: return 0
@property
def Fonts(self):
"""Neatly processed dict of the page's fonts. Serves as a shortcut
to .Resources.Font"""
if self._fonts is None:
self._fonts = {k: v.parsed_object
for k,v in six.iteritems(self.Resources.Font)}
return self._fonts
@property
def unique_id(self):
"""Unique key to lookup page numbers and such in the document"""
return self.get('ID', self._obj_key)
@property
def pages_index(self):
"""Zero-indexed page number in the document"""
if self.document:
return self.document.get_page_index(self)
class PdfPageNode(PdfAbstractPage):
"""Page node object"""
def __init__(self, node, obj_key=None, document=None):
"""Create a new PdfPageNode from an object dict"""
node = node.value
if node['Type'] != 'Pages':
raise ValueError('Type "Pages" expected, got "{}"'.format(node['Type']))
super(PdfPageNode, self).__init__(node, obj_key, document)
@property
def Kids(self):
"""Child pages and nodes"""
return [p.parsed_object for p in self._object['Kids'].value]
#def __getitem__(self, key):
# return self._kids[key]
#def __contains__(self, item):
# return self._kids.__contains__(item)
#def __setitem__(self, key, value):
# return self._kids.__setitem__(key, value)
#def __delitem__(self, key, value):
# return self._kids.__delitem__(key)
#def __iter__(self):
# return self._kids.__iter__()
#def __reversed__(self):
# return self._kids.__reversed__()
#@property
#def Count(self):
# return len(self._kids)
#def __str__(self):
# return 'PdfPageNode - %d children'%self.Count
class PdfPage(PdfAbstractPage):
"""Abstract class for pages and page nodes"""
def __init__(self, page, obj_key=None, document=None):
"""Create a new PdfPage from an object dict"""
page = page.value
if page['Type'] != 'Page':
raise PdfParseError('Page dicts must have Type = "Page"')
super(PdfPage, self).__init__(page, obj_key, document)
self._contents = ContentStream(page.get('Contents', []))
self._fonts = None # Load these when they're called
@property
def Contents(self):
return self._contents
def __getattr__(self, name):
#Default values
defaults = {'BleedBox': 'MediaBox',
'TrimBox' : 'CropBox',
'ArtBox' : 'CropBox'}
try:
val = super(PdfPage, self).__getattr__(name)
except KeyError:
try:
val = self.__dict__[defaults[name]]
except KeyError:
raise AttributeError('Object has no attribute "{}"'.format(name))
if isinstance(val, PdfType):
return val.parsed_object
else:
return val
class ContentStream(object):
"""A page's content stream"""
def __init__(self, contents):
if not isinstance(contents, PdfArray):
contents = [contents]
self._contents = contents
@property
def operations(self):
"""Iterator over the various PDF operations in the content stream.
Each element is an instance of a subclass of PdfOperation, which can
then be rendered by the page by calling e.g. next(operations)(renderer)
where renderer is a PdfRenderer object."""
for stream in self._contents:
for oper in self._extract_stream_ops(stream):
yield oper
@staticmethod
def _extract_stream_ops(stream):
operands = []
for op in PdfParser().iterparse(stream.value.data):
if isinstance(op, PdfRaw):
yield PdfOperation[op](*operands)
operands = []
else:
operands.append(op)
|
mit
| -6,712,480,742,190,932,000
| 35.647399
| 84
| 0.594322
| false
| 3.997478
| false
| false
| false
|
gutow/sagenb_slickgrid
|
sagenb/notebook/cell.py
|
1
|
82283
|
# -*- coding: utf-8 -*-
"""
A Cell
A cell is a single input/output block. Worksheets are built out of
a list of cells.
"""
###########################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
import os
import re
import shutil
import textwrap
import time
from cgi import escape
from sagenb.misc.misc import (word_wrap, strip_string_literals,
set_restrictive_permissions, unicode_str,
encoded_str)
from interact import (INTERACT_RESTART, INTERACT_UPDATE_PREFIX,
INTERACT_TEXT, INTERACT_HTML)
from flask import g #we need the site name to build paths
# Maximum number of characters allowed in output. This is needed
# avoid overloading web browser. For example, it should be possible
# to gracefully survive:
# while True:
# print "hello world"
# On the other hand, we don't want to loose the output of big matrices
# and numbers, so don't make this too small.
MAX_OUTPUT = 32000
MAX_OUTPUT_LINES = 120
# Used to detect and format tracebacks. See :func:`format_exception`.
TRACEBACK = 'Traceback (most recent call last):'
# This regexp matches "cell://blah..." in a non-greedy way (the ?), so
# we don't get several of these combined in one.
re_cell = re.compile('"cell://.*?"')
re_cell_2 = re.compile("'cell://.*?'") # same, but with single quotes
# Matches script blocks.
re_script = re.compile(r'<script[^>]*?>.*?</script>', re.DOTALL | re.I)
# Whether to enable editing of :class:`TextCell`s with TinyMCE.
JEDITABLE_TINYMCE = True
###########################
# Generic (abstract) cell #
###########################
class Cell_generic(object):
def __init__(self, id, worksheet):
"""
Creates a new generic cell.
INPUT:
- ``id`` - an integer or string; this cell's ID
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's parent worksheet
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: isinstance(C, sagenb.notebook.cell.Cell_generic)
True
sage: isinstance(C, sagenb.notebook.cell.TextCell)
False
sage: isinstance(C, sagenb.notebook.cell.Cell)
False
"""
try:
self._id = int(id)
except ValueError:
self._id = id
self._worksheet = worksheet
def __repr__(self):
"""
Returns a string representation of this generic cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.__repr__()
'Cell_generic 0'
"""
return "Cell_generic %s" % self._id
def __cmp__(self, right):
"""
Compares generic cells by ID.
INPUT:
- ``right`` - a :class:`Cell_generic` instance; the cell to
compare to this cell
OUTPUT:
- a boolean
EXAMPLES::
sage: C1 = sagenb.notebook.cell.Cell_generic(0, None)
sage: C2 = sagenb.notebook.cell.Cell_generic(1, None)
sage: C3 = sagenb.notebook.cell.Cell_generic(0, None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, True, False]
sage: C1 = sagenb.notebook.cell.TextCell('bagel', 'abc', None)
sage: C2 = sagenb.notebook.cell.TextCell('lox', 'abc', None)
sage: C3 = sagenb.notebook.cell.TextCell('lox', 'xyz', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[False, False, True]
sage: C1 = sagenb.notebook.cell.Cell(7, '3+2', '5', None)
sage: C2 = sagenb.notebook.cell.Cell(7, '3+2', 'five', None)
sage: C3 = sagenb.notebook.cell.Cell('7', '2+3', '5', None)
sage: [C1 == C2, C1 == C3, C2 == C3]
[True, True, True]
"""
return cmp(self.id(), right.id())
def id(self):
"""
Returns this generic cell's ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C = sagenb.notebook.cell.Cell('blue', '2+3', '5', None)
sage: C.id()
'blue'
sage: C = sagenb.notebook.cell.TextCell('yellow', '2+3', None)
sage: C.id()
'yellow'
"""
return self._id
def set_id(self, id):
"""
Sets this generic cell's ID.
INPUT:
- ``id`` - an integer or string; the new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: C.id()
0
sage: C.set_id('phone')
sage: C.id()
'phone'
"""
try:
self._id = int(id)
except ValueError:
self._id = id
def proxied_id(self):
"""
Returns the ID of the cell for which this generic cell is a
proxy. If this cell does not have such an ID, it returns the
cell's own ID.
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('self_stand_in', None)
sage: [C.id(), C.proxied_id()]
['self_stand_in', 'self_stand_in']
"""
try:
return self._proxied_id
except AttributeError:
return self._id
def set_proxied_id(self, proxied_id):
"""
Sets, for this generic cell, the ID of the cell that it
proxies.
INPUT:
- ``proxied_id`` - an integer or string; the proxied cell's ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic('understudy', None)
sage: [C.id(), C.proxied_id()]
['understudy', 'understudy']
sage: C.set_proxied_id('principal')
sage: [C.id(), C.proxied_id()]
['understudy', 'principal']
"""
self._proxied_id = proxied_id
def worksheet(self):
"""
Returns this generic cell's worksheet object.
OUTPUT:
- a :class:`sagenb.notebook.worksheet.Worksheet` instance
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, 'worksheet object')
sage: C.worksheet()
'worksheet object'
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet() is W
True
sage: nb.delete()
"""
return self._worksheet
def set_worksheet(self, worksheet, id=None):
"""
Sets this generic cell's worksheet object and, optionally, its
ID.
INPUT:
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; the
cell's new worksheet object
- ``id`` - an integer or string (default: None); the cell's
new ID
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell_generic(0, None)
sage: W = "worksheet object"
sage: C.set_worksheet(W)
sage: C.worksheet()
'worksheet object'
"""
self._worksheet = worksheet
if id is not None:
self.set_id(id)
def worksheet_filename(self):
"""
Returns the filename of this generic cell's worksheet object.
- ``publish`` - a boolean (default: False); whether to render
a published cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.worksheet_filename()
'sage/0'
sage: nb.delete()
"""
return self._worksheet.filename()
def notebook(self):
"""
Returns this generic cell's associated notebook object.
OUTPUT:
- a :class:`sagenb.notebook.notebook.Notebook` instance
EXAMPLES::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.notebook() is nb
True
sage: nb.delete()
"""
return self._worksheet.notebook()
def is_last(self):
"""
Returns whether this generic cell is the last cell in its
worksheet object.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2"); C
Cell 2: in=2^2, out=
sage: C.is_last()
True
sage: C = W.get_cell_with_id(0)
sage: C.is_last()
False
sage: nb.delete()
"""
return self._worksheet.cell_list()[-1] == self
def next_id(self):
"""
Returns the ID of the next cell in this generic cell's
worksheet object. If this cell is *not* in the worksheet, it
returns the ID of the worksheet's *first* cell. If this *is*
the last cell, it returns its *own* ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(1, "2^2")
sage: C = W.get_cell_with_id(1)
sage: C.next_id()
2
sage: C = W.get_cell_with_id(2)
sage: C.next_id()
2
sage: nb.delete()
"""
L = self._worksheet.cell_list()
try:
k = L.index(self)
except ValueError:
print "Warning -- cell %s no longer exists" % self.id()
return L[0].id()
try:
return L[k + 1].id()
except IndexError:
return self.id()
def is_text_cell(self):
"""
Returns whether this generic cell is a text cell, i.e., an
instance of :class:`TextCell`.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: C = sagenb.notebook.cell.Cell(0, '2+4', '6', None)
sage: [X.is_text_cell() for X in (G, T, C)]
[False, True, False]
"""
return isinstance(self, TextCell)
def is_compute_cell(self):
"""
Returns whether this generic cell is a compute cell, i.e., an
instance of :class:`Cell`.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: C = sagenb.notebook.cell.Cell(0, '2+4', '6', None)
sage: [X.is_compute_cell() for X in (G, T, C)]
[False, False, True]
"""
return isinstance(self, Cell)
def is_auto_cell(self):
"""
Returns whether this is an automatically evaluated generic
cell. This is always false for :class:`Cell_generic`\ s and
:class:`TextCell`\ s.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: [X.is_auto_cell() for X in (G, T)]
[False, False]
"""
return False
def is_interactive_cell(self):
"""
Returns whether this generic cell uses
:func:`sagenb.notebook.interact.interact` as a function call
or decorator.
OUTPUT:
- a boolean
EXAMPLES::
sage: G = sagenb.notebook.cell.Cell_generic(0, None)
sage: T = sagenb.notebook.cell.TextCell(0, 'hello!', None)
sage: [X.is_auto_cell() for X in (G, T)]
[False, False]
"""
return False
#############
# Text cell #
#############
class TextCell(Cell_generic):
def __init__(self, id, text, worksheet):
"""
Creates a new text cell.
INPUT:
- ``id`` - an integer or string; this cell's ID
- ``text`` - a string; this cell's contents
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's parent worksheet
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C == loads(dumps(C))
True
"""
text = unicode_str(text)
self._text = text
super(TextCell, self).__init__(id, worksheet)
def __repr__(self):
"""
Returns a string representation of this text cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.__repr__()
'TextCell 0: 2+3'
"""
return "TextCell %s: %s" % (self._id, encoded_str(self._text))
def delete_output(self):
"""
Delete all output in this text cell. This does nothing since
text cells have no output.
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.delete_output()
sage: C
TextCell 0: 2+3
"""
pass # nothing to do -- text cells have no output
def set_input_text(self, input_text):
"""
Sets the input text of this text cell.
INPUT:
- ``input_text`` - a string; the new input text for this cell
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C
TextCell 0: 2+3
sage: C.set_input_text("3+2")
sage: C
TextCell 0: 3+2
"""
input_text = unicode_str(input_text)
self._text = input_text
def html(self, wrap=None, div_wrap=True, do_print=False,
editing=False, publish=False):
"""
Returns HTML code for this text cell, including its contents
and associated script elements.
INPUT:
- ``wrap`` -- an integer (default: None); number of columns to
wrap at (not used)
- ``div_wrap`` -- a boolean (default: True); whether to wrap
in a div (not used)
- ``do_print`` - a boolean (default: False); whether to render the
cell for printing
- ``editing`` - a boolean (default: False); whether to open an
editor for this cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', W)
sage: C.html()
u'...text_cell...2+3...'
sage: C.set_input_text("$2+3$")
"""
from template import template
return template(os.path.join('html', 'notebook', 'text_cell.html'),
cell = self, wrap = wrap, div_wrap = div_wrap,
do_print = do_print,
editing = editing, publish = publish)
def plain_text(self, prompts=False):
ur"""
Returns a plain text version of this text cell.
INPUT:
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.plain_text()
u'2+3'
sage: C = sagenb.notebook.cell.TextCell(0, 'ěščřžýáíéďĎ', None)
sage: C.plain_text()
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
"""
return self._text
def edit_text(self):
"""
Returns the text to be displayed for this text cell in the
Edit window.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.edit_text()
u'2+3'
"""
return self._text
def set_cell_output_type(self, typ='wrap'):
"""
Sets this text cell's output type. This does nothing for
:class:`TextCell`\ s.
INPUT:
- ``typ`` - a string (default: 'wrap'); the target output type
EXAMPLES::
sage: C = sagenb.notebook.cell.TextCell(0, '2+3', None)
sage: C.set_cell_output_type("wrap")
"""
pass # ignored
################
# Compute cell #
################
class Cell(Cell_generic):
def __init__(self, id, input, out, worksheet):
"""
Creates a new compute cell.
INPUT:
- ``id`` - an integer or string; the new cell's ID
- ``input`` - a string; this cell's input
- ``out`` - a string; this cell's output
- ``worksheet`` - a
:class:`sagenb.notebook.worksheet.Worksheet` instance; this
cell's worksheet object
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C == loads(dumps(C))
True
"""
out = unicode_str(out)
input = unicode_str(input)
super(Cell, self).__init__(id, worksheet)
self._out = out.replace('\r', '')
self._interrupted = False
self.has_new_output = False
self._asap = False
self.set_input_text(input)
# start with a random integer so that evaluations of the cell
# from different runs have different version numbers.
from sys import maxint
from random import randint
self._version = randint(0,maxint)
def __repr__(self):
"""
Returns a string representation of this compute cell.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0: in=2+3, out=5
"""
return 'Cell %s: in=%s, out=%s' % (self.id(), self._in, self._out)
def is_asap(self):
"""
Returns whether this compute cell is to be evaluated as soon
as possible (ASAP).
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
try:
return self._asap
except AttributeError:
self._asap = False
return self._asap
def set_asap(self, asap):
"""
Sets whether to evaluate this compute cell as soon as possible
(ASAP).
INPUT:
- ``asap`` - a boolean convertible
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_asap()
False
sage: C.set_asap(True)
sage: C.is_asap()
True
"""
self._asap = bool(asap)
def delete_output(self):
r"""
Deletes all output in this compute cell. This also deletes the
files, since they appear as output of the cell.
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None); C
Cell 0: in=2+3, out=5
sage: C.delete_output()
sage: C
Cell 0: in=2+3, out=
When output is deleted, any files in the cell directory are deleted as well::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\nplot(sin(x),(x,0,5))\n///\n20\n}}}')
sage: C = W.cell_list()[0]
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0; in=plot(sin(x),(x,0,5)), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: C.delete_output()
sage: C.files()
[]
sage: W.quit()
sage: nb.delete()
"""
self._out = u''
self._out_html = u''
self._evaluated = False
self.delete_files()
def evaluated(self):
r"""
Returns whether this compute cell has been successfully
evaluated in a currently running session. This is not about
whether the output of the cell is valid given the input.
OUTPUT:
- a boolean
EXAMPLES: We create a worksheet with a cell that has wrong output::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\n2+3\n///\n20\n}}}')
sage: C = W.cell_list()[0]
sage: C
Cell 0: in=2+3, out=
20
We re-evaluate that input cell::
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('w', Cell 0: in=2+3, out=)
Now the output is right::
sage: C # random output -- depends on computer speed
Cell 0: in=2+3, out=
And the cell is considered to have been evaluated.
::
sage: C.evaluated() # random output -- depends on computer speed
True
::
sage: W.quit()
sage: nb.delete()
"""
# Cells are never considered evaluated in a new session.
if not self.worksheet().compute_process_has_been_started():
self._evaluated = False
return False
# Figure out if the worksheet is using the same sage
# session as this cell. (I'm not sure when this would
# be False.)
same_session = self.worksheet().sage() is self.sage()
try:
# Always not evaluated if sessions are different.
if not same_session:
self._evaluated = False
return False
return self._evaluated
except AttributeError:
# Default assumption is that cell has not been evaluated.
self._evaluated = False
return False
def set_no_output(self, no_output):
"""
Sets whether this is a "no output" compute cell, i.e., we
don't care about its output.
INPUT:
- ``no_output`` - a boolean convertible
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
self._no_output = bool(no_output)
def is_no_output(self):
"""
Returns whether this is a "no output" compute cell, i.e., we
don't care about its output.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_no_output()
False
sage: C.set_no_output(True)
sage: C.is_no_output()
True
"""
try:
return self._no_output
except AttributeError:
self._no_output = False
return self._no_output
def cell_output_type(self):
"""
Returns this compute cell's output type.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
try:
return self._type
except AttributeError:
self._type = 'wrap'
return self._type
def set_cell_output_type(self, typ='wrap'):
"""
Sets this compute cell's output type.
INPUT:
- ``typ`` - a string (default: 'wrap'); the target output type
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.cell_output_type()
'wrap'
sage: C.set_cell_output_type('nowrap')
sage: C.cell_output_type()
'nowrap'
"""
self._type = typ
def update_html_output(self, output=''):
"""
Updates this compute cell's the file list with HTML-style
links or embeddings.
For interactive cells, the HTML output section is always
empty, mainly because there is no good way to distinguish
content (e.g., images in the current directory) that goes into
the interactive template and content that would go here.
INPUT:
- ``output`` - a string (default: ''); the new output
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.update_html_output()
sage: C.output_html() # random output -- depends on computer speed
'<img src="/home/sage/0/cells/0/sage0.png?...">'
sage: W.quit()
sage: nb.delete()
"""
if self.is_interactive_cell():
self._out_html = u""
else:
self._out_html = self.files_html(output)
def directory(self):
"""
Returns the name of this compute cell's directory, creating
it, if it doesn't already exist.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.directory()
'.../home/sage/0/cells/0'
sage: nb.delete()
"""
dir = self._directory_name()
if not os.path.exists(dir):
os.makedirs(dir)
set_restrictive_permissions(dir)
return dir
def _directory_name(self):
"""
Returns the name of this compute cell's directory.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C._directory_name()
'.../home/sage/0/cells/0'
sage: nb.delete()
"""
return os.path.join(self._worksheet.directory(), 'cells',
str(self.id()))
def word_wrap_cols(self):
"""
Returns the number of columns for word wrapping this compute
cell. This defaults to 70, but the default setting for a
notebook is 72.
OUTPUT:
- an integer
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.word_wrap_cols()
70
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.word_wrap_cols()
72
sage: nb.delete()
"""
try:
return self.notebook().conf()['word_wrap_cols']
except AttributeError:
return 70
def plain_text(self, ncols=0, prompts=True, max_out=None):
r"""
Returns the plain text version of this compute cell.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
- ``max_out`` - an integer (default: None); the maximum number
of characters to return
OUTPUT:
- ``plaintext_output`` - Plaintext string of the cell
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
"""
if ncols == 0:
ncols = self.word_wrap_cols()
plaintext_output = u''
self._in = unicode_str(self._in)
input_lines = self._in
pr = 'sage: '
if prompts:
input_lines = input_lines.splitlines()
has_prompt = False
if pr == 'sage: ':
for v in input_lines:
w = v.lstrip()
if w[:5] == 'sage:' or w[:3] == '>>>' or w[:3] == '...':
has_prompt = True
break
else:
# discard first line since it sets the prompt
input_lines = input_lines[1:]
if has_prompt:
plaintext_output += '\n'.join(input_lines) + '\n'
else:
in_loop = False
for v in input_lines:
if len(v) == 0:
pass
elif len(v.lstrip()) != len(v): # starts with white space
in_loop = True
plaintext_output += '... ' + v + '\n'
elif v[:5] == 'else:':
in_loop = True
plaintext_output += '... ' + v + '\n'
else:
if in_loop:
plaintext_output += '...\n'
in_loop = False
plaintext_output += pr + v + '\n'
else:
plaintext_output += self._in
if prompts:
msg = TRACEBACK
if self._out.strip().startswith(msg):
v = self._out.strip().splitlines()
w = [msg, '...']
for i in range(1, len(v)):
if not (len(v[i]) > 0 and v[i][0] == ' '):
w = w + v[i:]
break
out = '\n'.join(w)
else:
out = self.output_text(ncols, raw=True, html=False)
else:
out = self.output_text(ncols, raw=True, html=False,
allow_interact=False)
out = '///\n' + out.strip('\n')
if not max_out is None and len(out) > max_out:
out = out[:max_out] + '...'
# Get rid of spurious carriage returns
plaintext_output = plaintext_output.strip('\n')
out = out.strip('\r\n')
plaintext_output = plaintext_output + '\n' + out
if not prompts:
plaintext_output = plaintext_output.rstrip('\n')
return plaintext_output
def edit_text(self, ncols=0, prompts=False, max_out=None):
ur"""
Returns the text displayed for this compute cell in the Edit
window.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``prompts`` - a boolean (default: False); whether to strip
interpreter prompts from the beginning of each line
- ``max_out`` - an integer (default: None); the maximum number
of characters to return
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.edit_text()
u'{{{id=0|\n2+3\n///\n5\n}}}'
sage: C = sagenb.notebook.cell.Cell(0, 'ěščřžýáíéďĎ', 'ěščřžýáíéďĎ', None)
sage: C.edit_text()
u'{{{id=0|\n\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e\n///\n\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e\n}}}'
"""
s = self.plain_text(ncols, prompts, max_out)
return u'{{{id=%s|\n%s\n}}}' % (self.id(), s)
def next_compute_id(self):
r"""
Returns the ID of the next compute cell in this compute cell's
worksheet object. If this cell is *not* in the worksheet, it
returns the ID of the worksheet's *first* compute cell. If
this *is* the last compute cell, it returns its *own* ID.
OUTPUT:
- an integer or string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('foo\n{{{\n2+3\n///\n5\n}}}bar\n{{{\n2+8\n///\n10\n}}}')
sage: W.new_cell_after(1, "2^2")
Cell 4: in=2^2, out=
sage: [W.get_cell_with_id(i).next_compute_id() for i in [1, 4, 3]]
[4, 3, 3]
"""
L = self.worksheet().compute_cell_list()
try:
k = L.index(self)
except ValueError:
print "Warning -- cell %s no longer exists" % self.id()
return L[0].id()
try:
return L[k + 1].id()
except IndexError:
return self.id()
def interrupt(self):
"""
Sets this compute cell's evaluation as interrupted.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
sage: C.evaluated()
False
sage: nb.delete()
"""
self._interrupted = True
self._evaluated = False
def interrupted(self):
"""
Returns whether this compute cell's evaluation has been
interrupted.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.interrupt()
sage: C.interrupted()
True
sage: nb.delete()
"""
return self._interrupted
def computing(self):
"""
Returns whether this compute cell is queued for evaluation by
its worksheet object.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.computing()
False
sage: nb.delete()
"""
return self in self.worksheet().queue()
def is_interactive_cell(self):
r"""
Returns whether this compute cell contains
:func:`sagenb.notebook.interact.interact` either as a function
call or decorator.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interactive_cell()
True
sage: C = W.new_cell_after(C.id(), "2+2")
sage: C.is_interactive_cell()
False
sage: nb.delete()
"""
# Do *not* cache
s = strip_string_literals(self.input_text())
if len(s) == 0:
return False
s = s[0]
return bool(re.search('(?<!\w)interact\s*\(.*\).*', s) or
re.search('\s*@\s*interact', s))
def is_interacting(self):
r"""
Returns whether this compute cell is currently
:func:`sagenb.notebook.interact.interact`\ ing.
OUTPUT:
- a boolean
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "@interact\ndef f(a=slider(0,10,1,5):\n print a^2")
sage: C.is_interacting()
False
"""
return hasattr(self, 'interact')
def stop_interacting(self):
"""
Stops :func:`sagenb.notebook.interact.interact`\ ion for this
compute cell.
TODO: Add doctests.
"""
if self.is_interacting():
del self.interact
def set_input_text(self, input):
"""
Sets the input text of this compute cell.
INPUT:
- ``input`` - a string; the new input text
TODO: Add doctests for the code dealing with interact.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = W.new_cell_after(0, "2^2")
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 1: in=2^2, out=
4
)
sage: initial_version=C.version()
sage: C.set_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.evaluated()
False
sage: C.version()-initial_version
1
sage: W.quit()
sage: nb.delete()
"""
# Stuff to deal with interact
input = unicode_str(input)
if input.startswith(INTERACT_UPDATE_PREFIX):
self.interact = input[len(INTERACT_UPDATE_PREFIX)+1:]
self._version = self.version() + 1
return
elif self.is_interacting():
try:
del self.interact
del self._interact_output
except AttributeError:
pass
# We have updated the input text so the cell can't have
# been evaluated.
self._evaluated = False
self._version = self.version() + 1
self._in = input
if hasattr(self, '_html_cache'):
del self._html_cache
#Run get the input text with all of the percent
#directives parsed
self._cleaned_input = self.parse_percent_directives()
def input_text(self):
"""
Returns this compute cell's input text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.input_text()
u'2+3'
"""
return self._in
def cleaned_input_text(self):
r"""
Returns this compute cell's "cleaned" input text, i.e., its
input with all of its percent directives removed. If this
cell is interacting, it returns the interacting text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.cleaned_input_text()
u'2+3'
"""
if self.is_interacting():
return self.interact
else:
return self._cleaned_input
def parse_percent_directives(self):
r"""
Parses this compute cell's percent directives, determines its
system (if any), and returns the "cleaned" input text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n%pi+3', '5', None)
sage: C.parse_percent_directives()
u'%pi+3'
sage: C.percent_directives()
[u'hide', u'maxima']
"""
self._system = None
text = self.input_text().splitlines()
directives = []
i = 0
for i, line in enumerate(text):
line = line.strip()
if not line.startswith('%'):
#Handle the #auto case here for now
if line == "#auto":
directives.append(line[1:])
else:
break
elif line in ['%auto', '%hide', '%hideall', '%save_server',
'%time', '%timeit']:
# We do not consider any of the above percent
# directives as specifying a system.
directives.append(line[1:])
else:
self._system = line[1:]
directives.append(line[1:])
i += 1
break
self._percent_directives = directives
if not self._system == 'fortran':
return "\n".join(text[i:]).strip()
return "\n".join(text[i:]).rstrip()
def percent_directives(self):
r"""
Returns a list of this compute cell's percent directives.
OUTPUT:
- a list of strings
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%hide\n%maxima\n2+3', '5', None)
sage: C.percent_directives()
[u'hide', u'maxima']
"""
try:
return self._percent_directives
except AttributeError:
self._percent_directives = []
return []
def system(self):
r"""
Returns the system used to evaluate this compute cell. The
system is specified by a percent directive like '%maxima' at
the top of a cell.
Returns None, if no system is explicitly specified. In this
case, the notebook evaluates the cell using the worksheet's
default system.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '%maxima\n2+3', '5', None)
sage: C.system()
u'maxima'
sage: prefixes = ['%hide', '%time', '']
sage: cells = [sagenb.notebook.cell.Cell(0, '%s\n2+3'%prefix, '5', None) for prefix in prefixes]
sage: [(C, C.system()) for C in cells if C.system() is not None]
[]
"""
self.parse_percent_directives()
return self._system
def is_auto_cell(self):
r"""
Returns whether this compute cell is evaluated automatically
when its worksheet object starts up.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.is_auto_cell()
False
sage: C = sagenb.notebook.cell.Cell(0, '#auto\n2+3', '5', None)
sage: C.is_auto_cell()
True
"""
return 'auto' in self.percent_directives()
def changed_input_text(self):
"""
Returns the changed input text for this compute cell, deleting
any previously stored text.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: initial_version=C.version()
sage: C.changed_input_text()
''
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.changed_input_text()
u'3+3'
sage: C.changed_input_text()
''
sage: C.version()-initial_version
0
"""
try:
t = self._changed_input
del self._changed_input
return t
except AttributeError:
return ''
def set_changed_input_text(self, new_text):
"""
Updates this compute cell's changed input text. Note: This
does not update the version of the cell. It's typically used,
e.g., for tab completion.
INPUT:
- ``new_text`` - a string; the new changed input text
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_changed_input_text('3+3')
sage: C.input_text()
u'3+3'
sage: C.changed_input_text()
u'3+3'
"""
new_text = unicode_str(new_text)
self._changed_input = new_text
self._in = new_text
def set_output_text(self, output, html, sage=None):
r"""
Sets this compute cell's output text.
INPUT:
- ``output`` - a string; the updated output text
- ``html`` - a string; updated output HTML
- ``sage`` - a :class:`sage` instance (default: None); the
sage instance to use for this cell(?)
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: len(C.plain_text())
11
sage: C.set_output_text('10', '10')
sage: len(C.plain_text())
12
"""
output = unicode_str(output)
html = unicode_str(html)
if output.count(INTERACT_TEXT) > 1:
html = u'<h3><font color="red">WARNING: multiple @interacts in one cell disabled (not yet implemented).</font></h3>'
output = u''
# In interacting mode, we just save the computed output
# (do not overwrite).
if self.is_interacting():
self._interact_output = (output, html)
if INTERACT_RESTART in output:
# We forfeit any interact output template (in
# self._out), so that the restart message propagates
# out. When the set_output_text function in
# notebook_lib.js gets the message, it should
# re-evaluate the cell from scratch.
self._out = output
return
if hasattr(self, '_html_cache'):
del self._html_cache
output = output.replace('\r', '')
# We do not truncate if "notruncate" or "Output truncated!" already
# appears in the output. This notruncate tag is used right now
# in sagenb.notebook.interact, sage.misc.html, and sage.database.sql_db.
if ('notruncate' not in output and
'Output truncated!' not in output
and
(len(output) > MAX_OUTPUT or
output.count('\n') > MAX_OUTPUT_LINES)):
url = ""
if not self.computing():
file = os.path.join(self.directory(), "full_output.txt")
open(file, "w").write(encoded_str(output))
url = "<a target='_new' href='%s/full_output.txt' class='file_link'>full_output.txt</a>" % (
self.url_to_self())
html += "<br>" + url
lines = output.splitlines()
start = '\n'.join(lines[:MAX_OUTPUT_LINES/2])[:MAX_OUTPUT/2]
end = '\n'.join(lines[-MAX_OUTPUT_LINES/2:])[-MAX_OUTPUT/2:]
warning = 'WARNING: Output truncated! '
if url:
# make the link to the full output appear at the top too.
warning += '\n<html>%s</html>\n' % url
output = warning + '\n\n' + start + '\n\n...\n\n' + end
self._out = output
if not self.is_interactive_cell():
self._out_html = html
self._sage = sage
def sage(self):
"""
Returns the :class:`sage` instance for this compute cell(?).
OUTPUT:
- an instance of :class:`sage`
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.sage() is None
True
"""
try:
return self._sage
except AttributeError:
return None
def output_html(self):
"""
Returns this compute cell's HTML output.
OUTPUT:
- a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.output_html()
''
sage: C.set_output_text('5', '<strong>5</strong>')
sage: C.output_html()
u'<strong>5</strong>'
"""
try:
return self._out_html
except AttributeError:
self._out_html = ''
return ''
def process_cell_urls(self, urls):
"""
Processes this compute cell's ``'cell://.*?'`` URLs, replacing
the protocol with the cell's path and appending the version number
to prevent cached copies from shadowing the updated copy.
INPUT:
- ``urls`` - a string; the URLs to process
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.process_cell_urls('"cell://foobar"')
'/sage/home/sage/0/cells/0/foobar?...'
"""
end = '?%d' % self.version()
begin = self.url_to_self()
for s in re_cell.findall(urls) + re_cell_2.findall(urls):
urls = urls.replace(s, begin + s[7:-1] + end)
return urls
def output_text(self, ncols=0, html=True, raw=False, allow_interact=True):
ur"""
Returns this compute cell's output text.
INPUT:
- ``ncols`` - an integer (default: 0); the number of word wrap
columns
- ``html`` - a boolean (default: True); whether to output HTML
- ``raw`` - a boolean (default: False); whether to output raw
text (takes precedence over HTML)
- ``allow_interact`` - a boolean (default: True); whether to
allow :func:`sagenb.notebook.interact.interact`\ ion
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.output_text()
u'<pre class="shrunk">5</pre>'
sage: C.output_text(html=False)
u'<pre class="shrunk">5</pre>'
sage: C.output_text(raw=True)
u'5'
sage: C = sagenb.notebook.cell.Cell(0, 'ěščřžýáíéďĎ', 'ěščřžýáíéďĎ', W)
sage: C.output_text()
u'<pre class="shrunk">\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e</pre>'
sage: C.output_text(raw=True)
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
"""
if allow_interact and hasattr(self, '_interact_output'):
# Get the input template
z = self.output_text(ncols, html, raw, allow_interact=False)
if not INTERACT_TEXT in z or not INTERACT_HTML in z:
return z
if ncols:
# Get the output template
try:
# Fill in the output template
output, html = self._interact_output
output = self.parse_html(output, ncols)
z = z.replace(INTERACT_TEXT, output)
z = z.replace(INTERACT_HTML, html)
return z
except (ValueError, AttributeError), msg:
print msg
pass
else:
# Get rid of the interact div to avoid updating the
# wrong output location during interact.
return ''
self._out = unicode_str(self._out)
is_interact = self.is_interactive_cell()
if is_interact and ncols == 0:
if 'Traceback (most recent call last)' in self._out:
s = self._out.replace('cell-interact', '')
is_interact = False
else:
return u'<h2>Click to the left again to hide and once more to show the dynamic interactive window</h2>'
else:
s = self._out
if raw:
return s
if html:
s = self.parse_html(s, ncols)
if (not is_interact and not self.is_html() and len(s.strip()) > 0 and
'<div class="docstring">' not in s):
s = '<pre class="shrunk">' + s.strip('\n') + '</pre>'
return s.strip('\n')
def parse_html(self, s, ncols):
r"""
Parses HTML for output, escaping and wrapping HTML and
removing script elements.
INPUT:
- ``s`` - a string; the HTML to parse
- ``ncols`` - an integer; the number of word wrap columns
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.parse_html('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN">\n<html><head></head><body>Test</body></html>', 80)
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0...Test</body>'
"""
def format(x):
return word_wrap(escape(x), ncols)
def format_html(x):
return self.process_cell_urls(x)
# If there is an error in the output, specially format it.
if not self.is_interactive_cell():
s = format_exception(format_html(s), ncols)
# Everything not wrapped in <html> ... </html> should be
# escaped and word wrapped.
t = ''
while len(s) > 0:
i = s.find('<html>')
if i == -1:
t += format(s)
break
j = s.find('</html>')
if j == -1:
t += format(s[:i])
break
t += format(s[:i]) + format_html(s[i + 6:j])
s = s[j + 7:]
t = t.replace('</html>', '')
# Get rid of the <script> tags, since we do not want them to
# be evaluated twice. They are only evaluated in the wrapped
# version of the output.
if ncols == 0:
t = re_script.sub('', t)
# This is a temporary hack
#re_inline = re.compile('<script type="math/tex">(.*?)</script>')
#re_display = re.compile('<script type="math/tex; mode=display">(.*?)</script>')
#t = re_inline.sub('<span class="math">\1</span>', t)
#t = re_display.sub('<div class="math">\1</div>', t)
#t = t.replace('<script type="math/tex">(.*?)</script>', '<span class="math">\1</span>')
#t = t.replace('<script type="math/tex; mode=display">(.*?)</script>', '<div class="math">\1</div>')
####t = t.replace('<script type="math/tex">', '<span class="math">')
####t = t.replace('</script>', '</span>')
return t
def has_output(self):
"""
Returns whether this compute cell has any output.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.has_output()
True
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '', None)
sage: C.has_output()
False
"""
return len(self._out.strip()) > 0
def is_html(self):
r"""
Returns whether this is an HTML compute cell, e.g., its system
is 'html'. This is typically specified by the percent
directive ``%html``.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, "%html\nTest HTML", None, None)
sage: C.system()
u'html'
sage: C.is_html()
True
sage: C = sagenb.notebook.cell.Cell(0, "Test HTML", None, None)
sage: C.is_html()
False
"""
return self.system() == 'html'
#################
# Introspection #
#################
def set_introspect_html(self, html, completing=False, raw=False):
ur"""
Sets this compute cell's introspection text.
INPUT:
- ``html`` - a string; the updated text
- ``completing`` - a boolean (default: False); whether the
completions menu is open
- ``raw`` - a boolean (default: False)
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.set_introspect_html('foobar')
sage: C.introspect_html()
u'foobar'
sage: C.set_introspect_html('`foobar`')
sage: C.introspect_html()
u'`foobar`'
sage: C.set_introspect_html('ěščřžýáíéďĎ')
sage: C.introspect_html()
u'\u011b\u0161\u010d\u0159\u017e\xfd\xe1\xed\xe9\u010f\u010e'
sage: W.quit()
sage: nb.delete()
"""
html = unicode_str(html)
self._introspect_html = html
def introspect_html(self):
"""
Returns this compute cell's introspection text, setting it to
'', if none is available.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect_html() # random output -- depends on computer speed
u'...<div class="docstring">...sage...</pre></div>...'
sage: W.quit()
sage: nb.delete()
"""
if not self.introspect():
return ''
try:
return self._introspect_html
except AttributeError:
self._introspect_html = u''
return u''
def introspect(self):
"""
Returns compute cell's introspection text.
OUTPUT:
- a string 2-tuple ("before" and "after" text) or boolean (not
introspecting)
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect()
[u'sage?', '']
sage: W.quit()
sage: nb.delete()
"""
try:
return self._introspect
except AttributeError:
return False
def unset_introspect(self):
"""
Clears this compute cell's introspection text.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'sage?', '', W)
sage: C.introspect()
False
sage: C.evaluate(username='sage')
sage: W.check_comp(9999) # random output -- depends on computer speed
('d', Cell 0: in=sage?, out=)
sage: C.introspect()
[u'sage?', '']
sage: C.unset_introspect()
sage: C.introspect()
False
sage: W.quit()
sage: nb.delete()
"""
self._introspect = False
def set_introspect(self, before_prompt, after_prompt):
"""
Set this compute cell's introspection text.
INPUT:
- ``before_prompt`` - a string
- ``after_prompt`` - a string
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.set_introspect("a", "b")
sage: C.introspect()
['a', 'b']
"""
self._introspect = [before_prompt, after_prompt]
def evaluate(self, introspect=False, time=None, username=None):
r"""
Evaluates this compute cell.
INPUT:
- ``introspect`` - a pair [``before_cursor``,
``after_cursor``] of strings (default: False)
- ``time`` - a boolean (default: None); whether to return the
time the computation takes
- ``username`` - a string (default: None); name of user doing
the evaluation
EXAMPLES:
We create a notebook, worksheet, and cell and evaluate it
in order to compute `3^5`::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: W.edit_save('{{{\n3^5\n}}}')
sage: C = W.cell_list()[0]; C
Cell 0: in=3^5, out=
sage: C.evaluate(username='sage')
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=3^5, out=
243
)
sage: C # random output -- depends on computer speed
Cell 0: in=3^5, out=
243
sage: W.quit()
sage: nb.delete()
"""
if introspect:
self.eval_method = 'introspect' # Run through TAB-introspection
else:
self.eval_method = 'eval' # Run through S-Enter, evaluate link, etc.
self._interrupted = False
self._evaluated = True
if time is not None:
self._time = time
self._introspect = introspect
self.worksheet().enqueue(self, username=username)
self._type = 'wrap'
dir = self.directory()
for D in os.listdir(dir):
F = os.path.join(dir, D)
try:
os.unlink(F)
except OSError:
try:
shutil.rmtree(F)
except:
pass
def version(self):
"""
Returns this compute cell's version number.
OUTPUT:
- an integer
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: initial_version=C.version() #random
sage: C.set_input_text('2+3')
sage: C.version()-initial_version
1
"""
try:
return self._version
except AttributeError:
# start with a random integer so that evaluations of the cell
# from different runs have different version numbers.
from sys import maxint
from random import randint
self._version = randint(0,maxint)
return self._version
def time(self):
r"""
Returns whether to print timing information about the
evaluation of this compute cell.
OUTPUT:
- a boolean
EXAMPLES::
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', None)
sage: C.time()
False
sage: C = sagenb.notebook.cell.Cell(0, '%time\n2+3', '5', None)
sage: C.time()
True
"""
return ('time' in self.percent_directives() or
'timeit' in self.percent_directives() or
getattr(self, '_time', False))
def html(self, wrap=None, div_wrap=True, do_print=False, publish=False):
r"""
Returns the HTML for this compute cell.
INPUT:
- ``wrap`` - an integer (default: None); the number of word
wrap columns
- ``div_wrap`` - a boolean (default: True); whether to wrap
the output in outer div elements
- ``do_print`` - a boolean (default: False); whether to return
output suitable for printing
- ``publish`` - a boolean (default: False); whether to render
a published cell
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.load_notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.html()
u'...cell_outer_0...2+3...5...'
"""
from template import template
if wrap is None:
wrap = self.notebook().conf()['word_wrap_cols']
return template(os.path.join('html', 'notebook', 'cell.html'),
cell=self, wrap=wrap, div_wrap=div_wrap,
do_print=do_print, publish=publish)
def url_to_self(self):
"""
Returns a notebook URL for this compute cell.
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.url_to_self()
'/home/sage/0/cells/0'
"""
try:
return self._url_to_self
except AttributeError:
self._url_to_self = '/'+g.site_name+'/home/%s/cells/%s' % (
self.worksheet_filename(), self.id())
return self._url_to_self
def url_to_worksheet(self):
"""
Returns a URL for the worksheet
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, '2+3', '5', W)
sage: C.url_to_worksheet()
'/home/sage/0'
"""
return '/'+g.site_name+'/home/{0}'.format(self.worksheet_filename())
def files(self):
"""
Returns a list of all the files in this compute cell's
directory.
OUTPUT:
- a list of strings
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: W.quit()
sage: nb.delete()
"""
dir = self.directory()
D = os.listdir(dir)
return D
def delete_files(self):
"""
Deletes all of the files associated with this compute cell.
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files() # random output -- depends on computer speed
['sage0.png']
sage: C.delete_files()
sage: C.files()
[]
sage: W.quit()
sage: nb.delete()
"""
try:
dir = self._directory_name()
except AttributeError:
return
if os.path.exists(dir):
shutil.rmtree(dir, ignore_errors=True)
def _jmol_files_html(self, F):
"""
Helper for jmol files in :meth:`files_html`
"""
# If F ends in -size500.jmol then we make the viewer applet
# with size 500.
i = F.rfind('-size')
if i != -1:
size = F[i + 5:-5]
else:
size = 500
# The ".jmol" script has defaultdirectory pointing
# to a zip file [see Graphics3d.show()]. But it is
# relative to the worksheet URL as seen in the browser.
# But that doesn't make sense for live help.
#
# So we need to prepend the worksheet URL, in order
# for the zip to be accessed correctly.
if self.worksheet().docbrowser():
jmol_name = os.path.join(self.directory(), F)
with open(jmol_name, 'r') as f:
jmol_script = f.read()
jmol_script = jmol_script.replace(
'defaultdirectory "',
'defaultdirectory "{0}/'.format(self.url_to_worksheet()))
with open(jmol_name, 'w') as f:
f.write(jmol_script)
image_name = os.path.join(self.url_to_self(),'.jmol_images',F)
script_name = os.path.join(self.url_to_self(), F)
return textwrap.dedent("""
<div id="sage_jmol_{id}" class="3DPlotDiv">
<div id="loadJmol" style="display:none;">{id}</div>
<div id="sage_jmol_size_{id}" style="display:none;">{size}</div>
<div id="sage_jmol_img_{id}" style="display:none;">{image_name}.png?{timestamp}</div>
<div id="sage_jmol_script_{id}" style="display:none;">{filename}?{timestamp}</div>
<div id="sage_jmol_server_url_{id}" style="display:none;">{callback}</div>
<div id="sage_jmol_status_{id}" style="display:none;">notActivated</div>
</div>
""").format(
id=self._id,
size=size,
image_name=image_name,
timestamp=time.time(),
filename=script_name,
callback=os.path.join(self.url_to_worksheet(), 'jsmol'),
)
def files_html(self, out):
"""
Returns HTML to display the files in this compute cell's
directory.
INPUT:
- ``out`` - a string; files to exclude. To exclude bar, foo,
..., use the format ``'cell://bar cell://foo ...'``
OUTPUT:
- a string
EXAMPLES::
sage: nb = sagenb.notebook.notebook.Notebook(tmp_dir(ext='.sagenb'))
sage: nb.user_manager().add_user('sage','sage','sage@sagemath.org',force=True)
sage: W = nb.create_new_worksheet('Test', 'sage')
sage: C = sagenb.notebook.cell.Cell(0, 'plot(sin(x),0,5)', '', W)
sage: C.evaluate()
sage: W.check_comp(wait=9999) # random output -- depends on computer speed
('d', Cell 0: in=plot(sin(x),0,5), out=
<html><font color='black'><img src='cell://sage0.png'></font></html>
<BLANKLINE>
)
sage: C.files_html('') # random output -- depends on computer speed
'<img src="/home/sage/0/cells/0/sage0.png?...">'
sage: W.quit()
sage: nb.delete()
"""
D = self.files()
D.sort()
if len(D) == 0:
return ''
images = []
files = []
#Flags to allow processing of old worksheets that include Jmol
hasjmol = False
jmoldatafile=''
hasjmolimages = False
jmolimagebase=''
from worksheet import CODE_PY
# The question mark trick here is so that images will be
# reloaded when the async request requests the output text for
# a computation. This is inspired by
# http://www.irt.org/script/416.htm/.
for F in D:
if os.path.split(F)[-1] == CODE_PY or 'cell://%s' % F in out:
continue
url = os.path.join(self.url_to_self(), F)
if (F.endswith('.png') or F.endswith('.bmp') or
F.endswith('.jpg') or F.endswith('.gif')):
images.append('<img src="%s?%d">' % (url, time.time()))
elif F.endswith('.obj'):
images.append("""<a href="javascript:sage3d_show('%s', '%s_%s', '%s');">Click for interactive view.</a>""" % (url, self._id, F, F[:-4]))
elif F.endswith('.mtl') or F.endswith(".objmeta"):
pass # obj data
elif F.endswith('.svg'):
images.append('<embed src="%s" type="image/svg+xml" name="emap">' % url)
elif F.endswith('.jmol'):
images.append(self._jmol_files_html(F))
jmolimagebase = F
hasjmol=True
elif F.endswith('.jmol.zip'):
# jmol data
jmoldatafile=os.path.join(self.directory(),F)
elif F.endswith('.canvas3d'):
script = '<div><script>canvas3d.viewer("%s?%s");</script></div>' %(url,time.time())
images.append(script)
elif F.startswith('.jmol_'):
# static jmol data and images
hasjmolimages=True
else:
link_text = str(F)
if len(link_text) > 40:
link_text = link_text[:10] + '...' + link_text[-20:]
files.append('<a target="_new" href="%s" class="file_link">%s</a>' % (url, link_text))
# TODO: remove this fugly in-place upgrading of worksheets
# and all the associated variables. If the worksheet is old
# just require a reevaluation.
if(hasjmol and not hasjmolimages):
# This is probably an old worksheet. Generate the missing jmol static image(s)
# Note: this is problematic in the notebook as it uses tools from Sage to
# generate the images.
head,tail = os.path.split(jmoldatafile)
# The path in the launch script file needs to be fixed.
worksheet, cellnum=os.path.split(head)
path = "cells/%s/%s" %(cellnum, tail)
f = open(os.path.join(head,jmolimagebase),'w')
f.write('set defaultdirectory "%s"\n' %path)
f.write('script SCRIPT\n')
f.close()
#name image file
png_path = os.path.realpath(os.path.join(head,'.jmol_images'))
if not os.path.exists(png_path):
os.mkdir(png_path)
png_name = os.path.join(png_path,jmolimagebase)
#test for JavaVM
from sage.interfaces.jmoldata import JmolData
jdata = JmolData()
if (jdata.is_jvm_available()):
# make the image with Jmol
png_fullpath=png_name+".png"
#print png_fullpath
script = 'set defaultdirectory \"'+jmoldatafile+'\"\n script SCRIPT\n'
#print script
jdata.export_image(targetfile = png_fullpath,datafile=script,image_type="PNG", figsize = 4)
else:
images.append('Java Virtual Machine Unavailable. Cannot make image from old data. Please reevaluate cell.')
if len(images) == 0:
images = ''
else:
images = "%s" % '<br>'.join(images)
if len(files) == 0:
files = ''
else:
files = (' '*3).join(files)
files = unicode_str(files)
images = unicode_str(images)
return images + files
# Alias
ComputeCell = Cell
#####################
# Utility functions #
#####################
def format_exception(s0, ncols):
r"""
Formats exceptions so they do not appear expanded by default.
INPUT:
- ``s0`` - a string
- ``ncols`` - an integer; number of word wrap columns
OUTPUT:
- a string
If ``s0`` contains "notracebacks," this function simply returns
``s0``.
EXAMPLES::
sage: sagenb.notebook.cell.format_exception(sagenb.notebook.cell.TRACEBACK,80)
'\nTraceback (click to the left of this block for traceback)\n...\nTraceback (most recent call last):'
sage: sagenb.notebook.cell.format_exception(sagenb.notebook.cell.TRACEBACK + "notracebacks",80)
'Traceback (most recent call last):notracebacks'
"""
s = s0.lstrip()
# Add a notracebacks option -- if it is in the string then
# tracebacks aren't shrunk. This is currently used by the
# functions sagenb.misc.support.help and sage.server.support.help.
if TRACEBACK not in s or 'notracebacks' in s:
return s0
if ncols > 0:
s = s.strip()
w = s.splitlines()
for k in range(len(w)):
if TRACEBACK in w[k]:
break
s = ('\n'.join(w[:k]) +
'\nTraceback (click to the left of this block for traceback)' +
'\n...\n' + w[-1])
else:
s = s.replace("exec compile(ur'", "")
s = s.replace("' + '\\n', '', 'single')", "")
return s
def number_of_rows(txt, ncols):
r"""
Returns the number of rows needed to display a string, given a
maximum number of columns per row.
INPUT:
- ``txt`` - a string; the text to "wrap"
- ``ncols`` - an integer; the number of word wrap columns
OUTPUT:
- an integer
EXAMPLES::
sage: from sagenb.notebook.cell import number_of_rows
sage: s = "asdfasdf\nasdfasdf\n"
sage: number_of_rows(s, 8)
2
sage: number_of_rows(s, 5)
4
sage: number_of_rows(s, 4)
4
"""
rows = txt.splitlines()
nrows = len(rows)
for i in range(nrows):
nrows += int((len(rows[i]) - 1) / ncols)
return nrows
|
gpl-3.0
| -4,787,506,408,650,216,000
| 30.904152
| 153
| 0.513361
| false
| 3.834927
| false
| false
| false
|
debalance/hp
|
hp/core/urls.py
|
1
|
1430
|
# -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-xmpp-account.
# If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url
from django.contrib.sitemaps.views import sitemap
from django.utils.translation import ugettext_lazy as _
from blog.sitemaps import BlogPostSitemap
from blog.sitemaps import PageSitemap
from . import views
from .sitemaps import StaticSitemap
sitemaps = {
'blog': BlogPostSitemap,
'page': PageSitemap,
'static': StaticSitemap,
}
app_name = 'core'
urlpatterns = [
url(_(r'^contact/$'), views.ContactView.as_view(), name='contact'),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap'),
url(r'^api/set-lang/$', views.SetLanguageView.as_view(), name='api-set-lang'),
]
|
gpl-3.0
| -8,019,578,557,664,934,000
| 35.666667
| 99
| 0.732867
| false
| 3.753281
| false
| false
| false
|
adazey/Muzez
|
libs/nltk/corpus/reader/wordnet.py
|
1
|
78713
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: WordNet
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Steven Bethard <Steven.Bethard@colorado.edu>
# Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# Nitin Madnani <nmadnani@ets.org>
# Nasruddin A’aidil Shari
# Sim Wei Ying Geraldine
# Soe Lynn
# Francis Bond <bond@ieee.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
An NLTK interface for WordNet
WordNet is a lexical database of English.
Using synsets, helps find conceptual relationships between words
such as hypernyms, hyponyms, synonyms, antonyms etc.
For details about WordNet see:
http://wordnet.princeton.edu/
This module also allows you to find lemmas in languages
other than English from the Open Multilingual Wordnet
http://compling.hss.ntu.edu.sg/omw/
"""
from __future__ import print_function, unicode_literals
import math
import re
from itertools import islice, chain
from operator import itemgetter, attrgetter
from collections import defaultdict, deque
from nltk.corpus.reader import CorpusReader
from nltk.util import binary_search_file as _binary_search_file
from nltk.probability import FreqDist
from nltk.compat import (iteritems, python_2_unicode_compatible,
total_ordering, xrange)
######################################################################
## Table of Contents
######################################################################
## - Constants
## - Data Classes
## - WordNetError
## - Lemma
## - Synset
## - WordNet Corpus Reader
## - WordNet Information Content Corpus Reader
## - Similarity Metrics
## - Demo
######################################################################
## Constants
######################################################################
#: Positive infinity (for similarity functions)
_INF = 1e300
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
POS_LIST = [NOUN, VERB, ADJ, ADV]
#: A table of strings that are used to express verb frames.
VERB_FRAME_STRINGS = (
None,
"Something %s",
"Somebody %s",
"It is %sing",
"Something is %sing PP",
"Something %s something Adjective/Noun",
"Something %s Adjective/Noun",
"Somebody %s Adjective",
"Somebody %s something",
"Somebody %s somebody",
"Something %s somebody",
"Something %s something",
"Something %s to somebody",
"Somebody %s on something",
"Somebody %s somebody something",
"Somebody %s something to somebody",
"Somebody %s something from somebody",
"Somebody %s somebody with something",
"Somebody %s somebody of something",
"Somebody %s something on somebody",
"Somebody %s somebody PP",
"Somebody %s something PP",
"Somebody %s PP",
"Somebody's (body part) %s",
"Somebody %s somebody to INFINITIVE",
"Somebody %s somebody INFINITIVE",
"Somebody %s that CLAUSE",
"Somebody %s to somebody",
"Somebody %s to INFINITIVE",
"Somebody %s whether INFINITIVE",
"Somebody %s somebody into V-ing something",
"Somebody %s something with something",
"Somebody %s INFINITIVE",
"Somebody %s VERB-ing",
"It %s that CLAUSE",
"Something %s INFINITIVE")
SENSENUM_RE = re.compile(r'\.\d\d\.')
######################################################################
## Data Classes
######################################################################
class WordNetError(Exception):
"""An exception class for wordnet-related errors."""
@total_ordering
class _WordNetObject(object):
"""A common base class for lemmas and synsets."""
def hypernyms(self):
return self._related('@')
def _hypernyms(self):
return self._related('@', sort=False)
def instance_hypernyms(self):
return self._related('@i')
def _instance_hypernyms(self):
return self._related('@i', sort=False)
def hyponyms(self):
return self._related('~')
def instance_hyponyms(self):
return self._related('~i')
def member_holonyms(self):
return self._related('#m')
def substance_holonyms(self):
return self._related('#s')
def part_holonyms(self):
return self._related('#p')
def member_meronyms(self):
return self._related('%m')
def substance_meronyms(self):
return self._related('%s')
def part_meronyms(self):
return self._related('%p')
def topic_domains(self):
return self._related(';c')
def region_domains(self):
return self._related(';r')
def usage_domains(self):
return self._related(';u')
def attributes(self):
return self._related('=')
def entailments(self):
return self._related('*')
def causes(self):
return self._related('>')
def also_sees(self):
return self._related('^')
def verb_groups(self):
return self._related('$')
def similar_tos(self):
return self._related('&')
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
return self._name == other._name
def __ne__(self, other):
return self._name != other._name
def __lt__(self, other):
return self._name < other._name
@python_2_unicode_compatible
class Lemma(_WordNetObject):
"""
The lexical entry for a single morphological form of a
sense-disambiguated word.
Create a Lemma from a "<word>.<pos>.<number>.<lemma>" string where:
<word> is the morphological stem identifying the synset
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
<lemma> is the morphological form of interest
Note that <word> and <lemma> can be different, e.g. the Synset
'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and
'salt.n.03.salinity'.
Lemma attributes, accessible via methods with the same name::
- name: The canonical name of this lemma.
- synset: The synset that this lemma belongs to.
- syntactic_marker: For adjectives, the WordNet string identifying the
syntactic position relative modified noun. See:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect10
For all other parts of speech, this attribute is None.
- count: The frequency of this lemma in wordnet.
Lemma methods:
Lemmas have the following methods for retrieving related Lemmas. They
correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Lemmas:
- antonyms
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- topic_domains, region_domains, usage_domains
- attributes
- derivationally_related_forms
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
- pertainyms
"""
__slots__ = ['_wordnet_corpus_reader', '_name', '_syntactic_marker',
'_synset', '_frame_strings', '_frame_ids',
'_lexname_index', '_lex_id', '_lang', '_key']
def __init__(self, wordnet_corpus_reader, synset, name,
lexname_index, lex_id, syntactic_marker):
self._wordnet_corpus_reader = wordnet_corpus_reader
self._name = name
self._syntactic_marker = syntactic_marker
self._synset = synset
self._frame_strings = []
self._frame_ids = []
self._lexname_index = lexname_index
self._lex_id = lex_id
self._lang = 'eng'
self._key = None # gets set later.
def name(self):
return self._name
def syntactic_marker(self):
return self._syntactic_marker
def synset(self):
return self._synset
def frame_strings(self):
return self._frame_strings
def frame_ids(self):
return self._frame_ids
def lang(self):
return self._lang
def key(self):
return self._key
def __repr__(self):
tup = type(self).__name__, self._synset._name, self._name
return "%s('%s.%s')" % tup
def _related(self, relation_symbol):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
return sorted([get_synset(pos, offset)._lemmas[lemma_index]
for pos, offset, lemma_index
in self._synset._lemma_pointers[self._name, relation_symbol]])
def count(self):
"""Return the frequency count for this Lemma"""
return self._wordnet_corpus_reader.lemma_count(self)
def antonyms(self):
return self._related('!')
def derivationally_related_forms(self):
return self._related('+')
def pertainyms(self):
return self._related('\\')
@python_2_unicode_compatible
class Synset(_WordNetObject):
"""Create a Synset from a "<lemma>.<pos>.<number>" string where:
<lemma> is the word's morphological stem
<pos> is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB
<number> is the sense number, counting from 0.
Synset attributes, accessible via methods with the same name:
- name: The canonical name of this synset, formed using the first lemma
of this synset. Note that this may be different from the name
passed to the constructor if that string used a different lemma to
identify the synset.
- pos: The synset's part of speech, matching one of the module level
attributes ADJ, ADJ_SAT, ADV, NOUN or VERB.
- lemmas: A list of the Lemma objects for this synset.
- definition: The definition for this synset.
- examples: A list of example strings for this synset.
- offset: The offset in the WordNet dict file of this synset.
- lexname: The name of the lexicographer file containing this synset.
Synset methods:
Synsets have the following methods for retrieving related Synsets.
They correspond to the names for the pointer symbols defined here:
http://wordnet.princeton.edu/man/wninput.5WN.html#sect3
These methods all return lists of Synsets.
- hypernyms, instance_hypernyms
- hyponyms, instance_hyponyms
- member_holonyms, substance_holonyms, part_holonyms
- member_meronyms, substance_meronyms, part_meronyms
- attributes
- entailments
- causes
- also_sees
- verb_groups
- similar_tos
Additionally, Synsets support the following methods specific to the
hypernym relation:
- root_hypernyms
- common_hypernyms
- lowest_common_hypernyms
Note that Synsets do not support the following relations because
these are defined by WordNet as lexical relations:
- antonyms
- derivationally_related_forms
- pertainyms
"""
__slots__ = ['_pos', '_offset', '_name', '_frame_ids',
'_lemmas', '_lemma_names',
'_definition', '_examples', '_lexname',
'_pointers', '_lemma_pointers', '_max_depth',
'_min_depth']
def __init__(self, wordnet_corpus_reader):
self._wordnet_corpus_reader = wordnet_corpus_reader
# All of these attributes get initialized by
# WordNetCorpusReader._synset_from_pos_and_line()
self._pos = None
self._offset = None
self._name = None
self._frame_ids = []
self._lemmas = []
self._lemma_names = []
self._definition = None
self._examples = []
self._lexname = None # lexicographer name
self._all_hypernyms = None
self._pointers = defaultdict(set)
self._lemma_pointers = defaultdict(set)
def pos(self):
return self._pos
def offset(self):
return self._offset
def name(self):
return self._name
def frame_ids(self):
return self._frame_ids
def definition(self):
return self._definition
def examples(self):
return self._examples
def lexname(self):
return self._lexname
def _needs_root(self):
if self._pos == NOUN:
if self._wordnet_corpus_reader.get_version() == '1.6':
return True
else:
return False
elif self._pos == VERB:
return True
def lemma_names(self, lang='eng'):
'''Return all the lemma_names associated with the synset'''
if lang=='eng':
return self._lemma_names
else:
self._wordnet_corpus_reader._load_lang_data(lang)
i = self._wordnet_corpus_reader.ss2of(self)
if i in self._wordnet_corpus_reader._lang_data[lang][0]:
return self._wordnet_corpus_reader._lang_data[lang][0][i]
else:
return []
def lemmas(self, lang='eng'):
'''Return all the lemma objects associated with the synset'''
if lang=='eng':
return self._lemmas
else:
self._wordnet_corpus_reader._load_lang_data(lang)
lemmark = []
lemmy = self.lemma_names(lang)
for lem in lemmy:
temp= Lemma(self._wordnet_corpus_reader, self, lem, self._wordnet_corpus_reader._lexnames.index(self.lexname()), 0, None)
temp._lang=lang
lemmark.append(temp)
return lemmark
def root_hypernyms(self):
"""Get the topmost hypernyms of this synset in WordNet."""
result = []
seen = set()
todo = [self]
while todo:
next_synset = todo.pop()
if next_synset not in seen:
seen.add(next_synset)
next_hypernyms = next_synset.hypernyms() + \
next_synset.instance_hypernyms()
if not next_hypernyms:
result.append(next_synset)
else:
todo.extend(next_hypernyms)
return result
# Simpler implementation which makes incorrect assumption that
# hypernym hierarchy is acyclic:
#
# if not self.hypernyms():
# return [self]
# else:
# return list(set(root for h in self.hypernyms()
# for root in h.root_hypernyms()))
def max_depth(self):
"""
:return: The length of the longest hypernym path from this
synset to the root.
"""
if "_max_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._max_depth = 0
else:
self._max_depth = 1 + max(h.max_depth() for h in hypernyms)
return self._max_depth
def min_depth(self):
"""
:return: The length of the shortest hypernym path from this
synset to the root.
"""
if "_min_depth" not in self.__dict__:
hypernyms = self.hypernyms() + self.instance_hypernyms()
if not hypernyms:
self._min_depth = 0
else:
self._min_depth = 1 + min(h.min_depth() for h in hypernyms)
return self._min_depth
def closure(self, rel, depth=-1):
"""Return the transitive closure of source under the rel
relationship, breadth-first
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> list(dog.closure(hyp))
[Synset('canine.n.02'), Synset('domestic_animal.n.01'),
Synset('carnivore.n.01'), Synset('animal.n.01'),
Synset('placental.n.01'), Synset('organism.n.01'),
Synset('mammal.n.01'), Synset('living_thing.n.01'),
Synset('vertebrate.n.01'), Synset('whole.n.02'),
Synset('chordate.n.01'), Synset('object.n.01'),
Synset('physical_entity.n.01'), Synset('entity.n.01')]
"""
from nltk.util import breadth_first
synset_offsets = []
for synset in breadth_first(self, rel, depth):
if synset._offset != self._offset:
if synset._offset not in synset_offsets:
synset_offsets.append(synset._offset)
yield synset
def hypernym_paths(self):
"""
Get the path(s) from this synset to the root, where each path is a
list of the synset nodes traversed on the way to the root.
:return: A list of lists, where each list gives the node sequence
connecting the initial ``Synset`` node and a root node.
"""
paths = []
hypernyms = self.hypernyms() + self.instance_hypernyms()
if len(hypernyms) == 0:
paths = [[self]]
for hypernym in hypernyms:
for ancestor_list in hypernym.hypernym_paths():
ancestor_list.append(self)
paths.append(ancestor_list)
return paths
def common_hypernyms(self, other):
"""
Find all synsets that are hypernyms of this synset and the
other synset.
:type other: Synset
:param other: other input synset.
:return: The synsets that are hypernyms of both synsets.
"""
if not self._all_hypernyms:
self._all_hypernyms = set(self_synset
for self_synsets in self._iter_hypernym_lists()
for self_synset in self_synsets)
if not other._all_hypernyms:
other._all_hypernyms = set(other_synset
for other_synsets in other._iter_hypernym_lists()
for other_synset in other_synsets)
return list(self._all_hypernyms.intersection(other._all_hypernyms))
def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False):
"""
Get a list of lowest synset(s) that both synsets have as a hypernym.
When `use_min_depth == False` this means that the synset which appears as a
hypernym of both `self` and `other` with the lowest maximum depth is returned
or if there are multiple such synsets at the same depth they are all returned
However, if `use_min_depth == True` then the synset(s) which has/have the lowest
minimum depth and appear(s) in both paths is/are returned.
By setting the use_min_depth flag to True, the behavior of NLTK2 can be preserved.
This was changed in NLTK3 to give more accurate results in a small set of cases,
generally with synsets concerning people. (eg: 'chef.n.01', 'fireman.n.01', etc.)
This method is an implementation of Ted Pedersen's "Lowest Common Subsumer" method
from the Perl Wordnet module. It can return either "self" or "other" if they are a
hypernym of the other.
:type other: Synset
:param other: other input synset
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (False by default)
creates a fake root that connects all the taxonomies. Set it
to True to enable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will need to be added
for nouns as well.
:type use_min_depth: bool
:param use_min_depth: This setting mimics older (v2) behavior of NLTK wordnet
If True, will use the min_depth function to calculate the lowest common
hypernyms. This is known to give strange results for some synset pairs
(eg: 'chef.n.01', 'fireman.n.01') but is retained for backwards compatibility
:return: The synsets that are the lowest common hypernyms of both synsets
"""
synsets = self.common_hypernyms(other)
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset.hypernyms = lambda: []
fake_synset.instance_hypernyms = lambda: []
synsets.append(fake_synset)
try:
if use_min_depth:
max_depth = max(s.min_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.min_depth() == max_depth]
else:
max_depth = max(s.max_depth() for s in synsets)
unsorted_lch = [s for s in synsets if s.max_depth() == max_depth]
return sorted(unsorted_lch)
except ValueError:
return []
def hypernym_distances(self, distance=0, simulate_root=False):
"""
Get the path(s) from this synset to the root, counting the distance
of each node from the initial node on the way. A set of
(synset, distance) tuples is returned.
:type distance: int
:param distance: the distance (number of edges) from this hypernym to
the original hypernym ``Synset`` on which this method was called.
:return: A set of ``(Synset, int)`` tuples where each ``Synset`` is
a hypernym of the first ``Synset``.
"""
distances = set([(self, distance)])
for hypernym in self._hypernyms() + self._instance_hypernyms():
distances |= hypernym.hypernym_distances(distance+1, simulate_root=False)
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
fake_synset_distance = max(distances, key=itemgetter(1))[1]
distances.add((fake_synset, fake_synset_distance+1))
return distances
def _shortest_hypernym_paths(self, simulate_root):
if self._name == '*ROOT*':
return {self: 0}
queue = deque([(self, 0)])
path = {}
while queue:
s, depth = queue.popleft()
if s in path:
continue
path[s] = depth
depth += 1
queue.extend((hyp, depth) for hyp in s._hypernyms())
queue.extend((hyp, depth) for hyp in s._instance_hypernyms())
if simulate_root:
fake_synset = Synset(None)
fake_synset._name = '*ROOT*'
path[fake_synset] = max(path.values()) + 1
return path
def shortest_path_distance(self, other, simulate_root=False):
"""
Returns the distance of the shortest path linking the two synsets (if
one exists). For each synset, all the ancestor nodes and their
distances are recorded and compared. The ancestor node common to both
synsets that can be reached with the minimum number of traversals is
used. If no ancestor nodes are common, None is returned. If a node is
compared with itself 0 is returned.
:type other: Synset
:param other: The Synset to which the shortest path will be found.
:return: The number of edges in the shortest path connecting the two
nodes, or None if no path exists.
"""
if self == other:
return 0
dist_dict1 = self._shortest_hypernym_paths(simulate_root)
dist_dict2 = other._shortest_hypernym_paths(simulate_root)
# For each ancestor synset common to both subject synsets, find the
# connecting path length. Return the shortest of these.
inf = float('inf')
path_distance = inf
for synset, d1 in iteritems(dist_dict1):
d2 = dist_dict2.get(synset, inf)
path_distance = min(path_distance, d1 + d2)
return None if math.isinf(path_distance) else path_distance
def tree(self, rel, depth=-1, cut_mark=None):
"""
>>> from nltk.corpus import wordnet as wn
>>> dog = wn.synset('dog.n.01')
>>> hyp = lambda s:s.hypernyms()
>>> from pprint import pprint
>>> pprint(dog.tree(hyp))
[Synset('dog.n.01'),
[Synset('canine.n.02'),
[Synset('carnivore.n.01'),
[Synset('placental.n.01'),
[Synset('mammal.n.01'),
[Synset('vertebrate.n.01'),
[Synset('chordate.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'),
[Synset('entity.n.01')]]]]]]]]]]]]],
[Synset('domestic_animal.n.01'),
[Synset('animal.n.01'),
[Synset('organism.n.01'),
[Synset('living_thing.n.01'),
[Synset('whole.n.02'),
[Synset('object.n.01'),
[Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]]
"""
tree = [self]
if depth != 0:
tree += [x.tree(rel, depth-1, cut_mark) for x in rel(self)]
elif cut_mark:
tree += [cut_mark]
return tree
# interface to similarity methods
def path_similarity(self, other, verbose=False, simulate_root=True):
"""
Path Distance Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses in the is-a (hypernym/hypnoym)
taxonomy. The score is in the range 0 to 1, except in those cases where
a path cannot be found (will only be true for verbs as there are many
distinct verb taxonomies), in which case None is returned. A score of
1 represents identity i.e. comparing a sense with itself will return 1.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally between 0 and 1. None is returned if no connecting path
could be found. 1 is returned if a ``Synset`` is compared with
itself.
"""
distance = self.shortest_path_distance(other, simulate_root=simulate_root and self._needs_root())
if distance is None or distance < 0:
return None
return 1.0 / (distance + 1)
def lch_similarity(self, other, verbose=False, simulate_root=True):
"""
Leacock Chodorow Similarity:
Return a score denoting how similar two word senses are, based on the
shortest path that connects the senses (as above) and the maximum depth
of the taxonomy in which the senses occur. The relationship is given as
-log(p/2d) where p is the shortest path length and d is the taxonomy
depth.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A score denoting the similarity of the two ``Synset`` objects,
normally greater than 0. None is returned if no connecting path
could be found. If a ``Synset`` is compared with itself, the
maximum score is returned, which varies depending on the taxonomy
depth.
"""
if self._pos != other._pos:
raise WordNetError('Computing the lch similarity requires ' + \
'%s and %s to have the same part of speech.' % \
(self, other))
need_root = self._needs_root()
if self._pos not in self._wordnet_corpus_reader._max_depth:
self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root)
depth = self._wordnet_corpus_reader._max_depth[self._pos]
distance = self.shortest_path_distance(other, simulate_root=simulate_root and need_root)
if distance is None or distance < 0 or depth == 0:
return None
return -math.log((distance + 1) / (2.0 * depth))
def wup_similarity(self, other, verbose=False, simulate_root=True):
"""
Wu-Palmer Similarity:
Return a score denoting how similar two word senses are, based on the
depth of the two senses in the taxonomy and that of their Least Common
Subsumer (most specific ancestor node). Previously, the scores computed
by this implementation did _not_ always agree with those given by
Pedersen's Perl implementation of WordNet Similarity. However, with
the addition of the simulate_root flag (see below), the score for
verbs now almost always agree but not always for nouns.
The LCS does not necessarily feature in the shortest path connecting
the two senses, as it is by definition the common ancestor deepest in
the taxonomy, not closest to the two senses. Typically, however, it
will so feature. Where multiple candidates for the LCS exist, that
whose shortest path to the root node is the longest will be selected.
Where the LCS has multiple paths to the root, the longer path is used
for the purposes of the calculation.
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type simulate_root: bool
:param simulate_root: The various verb taxonomies do not
share a single root which disallows this metric from working for
synsets that are not connected. This flag (True by default)
creates a fake root that connects all the taxonomies. Set it
to false to disable this behavior. For the noun taxonomy,
there is usually a default root except for WordNet version 1.6.
If you are using wordnet 1.6, a fake root will be added for nouns
as well.
:return: A float score denoting the similarity of the two ``Synset`` objects,
normally greater than zero. If no connecting path between the two
senses can be found, None is returned.
"""
need_root = self._needs_root()
# Note that to preserve behavior from NLTK2 we set use_min_depth=True
# It is possible that more accurate results could be obtained by
# removing this setting and it should be tested later on
subsumers = self.lowest_common_hypernyms(other, simulate_root=simulate_root and need_root, use_min_depth=True)
# If no LCS was found return None
if len(subsumers) == 0:
return None
subsumer = subsumers[0]
# Get the longest path from the LCS to the root,
# including a correction:
# - add one because the calculations include both the start and end
# nodes
depth = subsumer.max_depth() + 1
# Note: No need for an additional add-one correction for non-nouns
# to account for an imaginary root node because that is now automatically
# handled by simulate_root
# if subsumer._pos != NOUN:
# depth += 1
# Get the shortest path from the LCS to each of the synsets it is
# subsuming. Add this to the LCS path length to get the path
# length from each synset to the root.
len1 = self.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
len2 = other.shortest_path_distance(subsumer, simulate_root=simulate_root and need_root)
if len1 is None or len2 is None:
return None
len1 += depth
len2 += depth
return (2.0 * depth) / (len1 + len2)
def res_similarity(self, other, ic, verbose=False):
"""
Resnik Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
Synsets whose LCS is the root node of the taxonomy will have a
score of 0 (e.g. N['dog'][0] and N['table'][0]).
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return lcs_ic
def jcn_similarity(self, other, ic, verbose=False):
"""
Jiang-Conrath Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects.
"""
if self == other:
return _INF
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
# If either of the input synsets are the root synset, or have a
# frequency of 0 (sparse data problem), return 0.
if ic1 == 0 or ic2 == 0:
return 0
ic_difference = ic1 + ic2 - 2 * lcs_ic
if ic_difference == 0:
return _INF
return 1 / ic_difference
def lin_similarity(self, other, ic, verbose=False):
"""
Lin Similarity:
Return a score denoting how similar two word senses are, based on the
Information Content (IC) of the Least Common Subsumer (most specific
ancestor node) and that of the two input Synsets. The relationship is
given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)).
:type other: Synset
:param other: The ``Synset`` that this ``Synset`` is being compared to.
:type ic: dict
:param ic: an information content object (as returned by ``nltk.corpus.wordnet_ic.ic()``).
:return: A float score denoting the similarity of the two ``Synset`` objects,
in the range 0 to 1.
"""
ic1, ic2, lcs_ic = _lcs_ic(self, other, ic)
return (2.0 * lcs_ic) / (ic1 + ic2)
def _iter_hypernym_lists(self):
"""
:return: An iterator over ``Synset`` objects that are either proper
hypernyms or instance of hypernyms of the synset.
"""
todo = [self]
seen = set()
while todo:
for synset in todo:
seen.add(synset)
yield todo
todo = [hypernym
for synset in todo
for hypernym in (synset.hypernyms() +
synset.instance_hypernyms())
if hypernym not in seen]
def __repr__(self):
return "%s('%s')" % (type(self).__name__, self._name)
def _related(self, relation_symbol, sort=True):
get_synset = self._wordnet_corpus_reader._synset_from_pos_and_offset
pointer_tuples = self._pointers[relation_symbol]
r = [get_synset(pos, offset) for pos, offset in pointer_tuples]
if sort:
r.sort()
return r
######################################################################
## WordNet Corpus Reader
######################################################################
class WordNetCorpusReader(CorpusReader):
"""
A corpus reader used to access wordnet or its variants.
"""
_ENCODING = 'utf8'
#{ Part-of-speech constants
ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'
#}
#{ Filename constants
_FILEMAP = {ADJ: 'adj', ADV: 'adv', NOUN: 'noun', VERB: 'verb'}
#}
#{ Part of speech constants
_pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5}
_pos_names = dict(tup[::-1] for tup in _pos_numbers.items())
#}
#: A list of file identifiers for all the fileids used by this
#: corpus reader.
_FILES = ('cntlist.rev', 'lexnames', 'index.sense',
'index.adj', 'index.adv', 'index.noun', 'index.verb',
'data.adj', 'data.adv', 'data.noun', 'data.verb',
'adj.exc', 'adv.exc', 'noun.exc', 'verb.exc', )
def __init__(self, root, omw_reader):
"""
Construct a new wordnet corpus reader, with the given root
directory.
"""
super(WordNetCorpusReader, self).__init__(root, self._FILES,
encoding=self._ENCODING)
# A index that provides the file offset
# Map from lemma -> pos -> synset_index -> offset
self._lemma_pos_offset_map = defaultdict(dict)
# A cache so we don't have to reconstuct synsets
# Map from pos -> offset -> synset
self._synset_offset_cache = defaultdict(dict)
# A lookup for the maximum depth of each part of speech. Useful for
# the lch similarity metric.
self._max_depth = defaultdict(dict)
# Corpus reader containing omw data.
self._omw_reader = omw_reader
# A cache to store the wordnet data of multiple languages
self._lang_data = defaultdict(list)
self._data_file_map = {}
self._exception_map = {}
self._lexnames = []
self._key_count_file = None
self._key_synset_file = None
# Load the lexnames
for i, line in enumerate(self.open('lexnames')):
index, lexname, _ = line.split()
assert int(index) == i
self._lexnames.append(lexname)
# Load the indices for lemmas and synset offsets
self._load_lemma_pos_offset_map()
# load the exception file data into memory
self._load_exception_map()
# Open Multilingual WordNet functions, contributed by
# Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn
def of2ss(self, of):
''' take an id and return the synsets '''
return self._synset_from_pos_and_offset(of[-1], int(of[:8]))
def ss2of(self, ss):
''' return the ID of the synset '''
return ("{:08d}-{}".format(ss.offset(), ss.pos()))
def _load_lang_data(self, lang):
''' load the wordnet data of the requested language from the file to the cache, _lang_data '''
if lang not in self.langs():
raise WordNetError("Language is not supported.")
if lang in self._lang_data.keys():
return
f = self._omw_reader.open('{0:}/wn-data-{0:}.tab'.format(lang))
self._lang_data[lang].append(defaultdict(list))
self._lang_data[lang].append(defaultdict(list))
for l in f.readlines():
l = l.replace('\n', '')
l = l.replace(' ', '_')
if l[0] != '#':
word = l.split('\t')
self._lang_data[lang][0][word[0]].append(word[2])
self._lang_data[lang][1][word[2]].append(word[0])
f.close()
def langs(self):
''' return a list of languages supported by Multilingual Wordnet '''
import os
langs = [ 'eng' ]
fileids = self._omw_reader.fileids()
for fileid in fileids:
file_name, file_extension = os.path.splitext(fileid)
if file_extension == '.tab':
langs.append(file_name.split('-')[-1])
return langs
def _load_lemma_pos_offset_map(self):
for suffix in self._FILEMAP.values():
# parse each line of the file (ignoring comment lines)
for i, line in enumerate(self.open('index.%s' % suffix)):
if line.startswith(' '):
continue
_iter = iter(line.split())
_next_token = lambda: next(_iter)
try:
# get the lemma and part-of-speech
lemma = _next_token()
pos = _next_token()
# get the number of synsets for this lemma
n_synsets = int(_next_token())
assert n_synsets > 0
# get the pointer symbols for all synsets of this lemma
n_pointers = int(_next_token())
_ = [_next_token() for _ in xrange(n_pointers)]
# same as number of synsets
n_senses = int(_next_token())
assert n_synsets == n_senses
# get number of senses ranked according to frequency
_ = int(_next_token())
# get synset offsets
synset_offsets = [int(_next_token()) for _ in xrange(n_synsets)]
# raise more informative error with file name and line number
except (AssertionError, ValueError) as e:
tup = ('index.%s' % suffix), (i + 1), e
raise WordNetError('file %s, line %i: %s' % tup)
# map lemmas and parts of speech to synsets
self._lemma_pos_offset_map[lemma][pos] = synset_offsets
if pos == ADJ:
self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets
def _load_exception_map(self):
# load the exception file data into memory
for pos, suffix in self._FILEMAP.items():
self._exception_map[pos] = {}
for line in self.open('%s.exc' % suffix):
terms = line.split()
self._exception_map[pos][terms[0]] = terms[1:]
self._exception_map[ADJ_SAT] = self._exception_map[ADJ]
def _compute_max_depth(self, pos, simulate_root):
"""
Compute the max depth for the given part of speech. This is
used by the lch similarity metric.
"""
depth = 0
for ii in self.all_synsets(pos):
try:
depth = max(depth, ii.max_depth())
except RuntimeError:
print(ii)
if simulate_root:
depth += 1
self._max_depth[pos] = depth
def get_version(self):
fh = self._data_file(ADJ)
for line in fh:
match = re.search(r'WordNet (\d+\.\d+) Copyright', line)
if match is not None:
version = match.group(1)
fh.seek(0)
return version
#////////////////////////////////////////////////////////////
# Loading Lemmas
#////////////////////////////////////////////////////////////
def lemma(self, name, lang='eng'):
'''Return lemma object that matches the name'''
# cannot simply split on first '.', e.g.: '.45_caliber.a.01..45_caliber'
separator = SENSENUM_RE.search(name).start()
synset_name, lemma_name = name[:separator+3], name[separator+4:]
synset = self.synset(synset_name)
for lemma in synset.lemmas(lang):
if lemma._name == lemma_name:
return lemma
raise WordNetError('no lemma %r in %r' % (lemma_name, synset_name))
def lemma_from_key(self, key):
# Keys are case sensitive and always lower-case
key = key.lower()
lemma_name, lex_sense = key.split('%')
pos_number, lexname_index, lex_id, _, _ = lex_sense.split(':')
pos = self._pos_names[int(pos_number)]
# open the key -> synset file if necessary
if self._key_synset_file is None:
self._key_synset_file = self.open('index.sense')
# Find the synset for the lemma.
synset_line = _binary_search_file(self._key_synset_file, key)
if not synset_line:
raise WordNetError("No synset found for key %r" % key)
offset = int(synset_line.split()[1])
synset = self._synset_from_pos_and_offset(pos, offset)
# return the corresponding lemma
for lemma in synset._lemmas:
if lemma._key == key:
return lemma
raise WordNetError("No lemma found for for key %r" % key)
#////////////////////////////////////////////////////////////
# Loading Synsets
#////////////////////////////////////////////////////////////
def synset(self, name):
# split name into lemma, part of speech and synset number
lemma, pos, synset_index_str = name.lower().rsplit('.', 2)
synset_index = int(synset_index_str) - 1
# get the offset for this synset
try:
offset = self._lemma_pos_offset_map[lemma][pos][synset_index]
except KeyError:
message = 'no lemma %r with part of speech %r'
raise WordNetError(message % (lemma, pos))
except IndexError:
n_senses = len(self._lemma_pos_offset_map[lemma][pos])
message = "lemma %r with part of speech %r has only %i %s"
if n_senses == 1:
tup = lemma, pos, n_senses, "sense"
else:
tup = lemma, pos, n_senses, "senses"
raise WordNetError(message % tup)
# load synset information from the appropriate file
synset = self._synset_from_pos_and_offset(pos, offset)
# some basic sanity checks on loaded attributes
if pos == 's' and synset._pos == 'a':
message = ('adjective satellite requested but only plain '
'adjective found for lemma %r')
raise WordNetError(message % lemma)
assert synset._pos == pos or (pos == 'a' and synset._pos == 's')
# Return the synset object.
return synset
def _data_file(self, pos):
"""
Return an open file pointer for the data file for the given
part of speech.
"""
if pos == ADJ_SAT:
pos = ADJ
if self._data_file_map.get(pos) is None:
fileid = 'data.%s' % self._FILEMAP[pos]
self._data_file_map[pos] = self.open(fileid)
return self._data_file_map[pos]
def _synset_from_pos_and_offset(self, pos, offset):
# Check to see if the synset is in the cache
if offset in self._synset_offset_cache[pos]:
return self._synset_offset_cache[pos][offset]
data_file = self._data_file(pos)
data_file.seek(offset)
data_file_line = data_file.readline()
synset = self._synset_from_pos_and_line(pos, data_file_line)
assert synset._offset == offset
self._synset_offset_cache[pos][offset] = synset
return synset
def _synset_from_pos_and_line(self, pos, data_file_line):
# Construct a new (empty) synset.
synset = Synset(self)
# parse the entry for this synset
try:
# parse out the definitions and examples from the gloss
columns_str, gloss = data_file_line.split('|')
gloss = gloss.strip()
definitions = []
for gloss_part in gloss.split(';'):
gloss_part = gloss_part.strip()
if gloss_part.startswith('"'):
synset._examples.append(gloss_part.strip('"'))
else:
definitions.append(gloss_part)
synset._definition = '; '.join(definitions)
# split the other info into fields
_iter = iter(columns_str.split())
_next_token = lambda: next(_iter)
# get the offset
synset._offset = int(_next_token())
# determine the lexicographer file name
lexname_index = int(_next_token())
synset._lexname = self._lexnames[lexname_index]
# get the part of speech
synset._pos = _next_token()
# create Lemma objects for each lemma
n_lemmas = int(_next_token(), 16)
for _ in xrange(n_lemmas):
# get the lemma name
lemma_name = _next_token()
# get the lex_id (used for sense_keys)
lex_id = int(_next_token(), 16)
# If the lemma has a syntactic marker, extract it.
m = re.match(r'(.*?)(\(.*\))?$', lemma_name)
lemma_name, syn_mark = m.groups()
# create the lemma object
lemma = Lemma(self, synset, lemma_name, lexname_index,
lex_id, syn_mark)
synset._lemmas.append(lemma)
synset._lemma_names.append(lemma._name)
# collect the pointer tuples
n_pointers = int(_next_token())
for _ in xrange(n_pointers):
symbol = _next_token()
offset = int(_next_token())
pos = _next_token()
lemma_ids_str = _next_token()
if lemma_ids_str == '0000':
synset._pointers[symbol].add((pos, offset))
else:
source_index = int(lemma_ids_str[:2], 16) - 1
target_index = int(lemma_ids_str[2:], 16) - 1
source_lemma_name = synset._lemmas[source_index]._name
lemma_pointers = synset._lemma_pointers
tups = lemma_pointers[source_lemma_name, symbol]
tups.add((pos, offset, target_index))
# read the verb frames
try:
frame_count = int(_next_token())
except StopIteration:
pass
else:
for _ in xrange(frame_count):
# read the plus sign
plus = _next_token()
assert plus == '+'
# read the frame and lemma number
frame_number = int(_next_token())
frame_string_fmt = VERB_FRAME_STRINGS[frame_number]
lemma_number = int(_next_token(), 16)
# lemma number of 00 means all words in the synset
if lemma_number == 0:
synset._frame_ids.append(frame_number)
for lemma in synset._lemmas:
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# only a specific word in the synset
else:
lemma = synset._lemmas[lemma_number - 1]
lemma._frame_ids.append(frame_number)
lemma._frame_strings.append(frame_string_fmt %
lemma._name)
# raise a more informative error with line text
except ValueError as e:
raise WordNetError('line %r: %s' % (data_file_line, e))
# set sense keys for Lemma objects - note that this has to be
# done afterwards so that the relations are available
for lemma in synset._lemmas:
if synset._pos == ADJ_SAT:
head_lemma = synset.similar_tos()[0]._lemmas[0]
head_name = head_lemma._name
head_id = '%02d' % head_lemma._lex_id
else:
head_name = head_id = ''
tup = (lemma._name, WordNetCorpusReader._pos_numbers[synset._pos],
lemma._lexname_index, lemma._lex_id, head_name, head_id)
lemma._key = ('%s%%%d:%02d:%02d:%s:%s' % tup).lower()
# the canonical name is based on the first lemma
lemma_name = synset._lemmas[0]._name.lower()
offsets = self._lemma_pos_offset_map[lemma_name][synset._pos]
sense_index = offsets.index(synset._offset)
tup = lemma_name, synset._pos, sense_index + 1
synset._name = '%s.%s.%02i' % tup
return synset
#////////////////////////////////////////////////////////////
# Retrieve synsets and lemmas.
#////////////////////////////////////////////////////////////
def synsets(self, lemma, pos=None, lang='eng'):
"""Load all synsets with a given lemma and part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
If lang is specified, all the synsets associated with the lemma name
of that language will be returned.
"""
lemma = lemma.lower()
if lang == 'eng':
get_synset = self._synset_from_pos_and_offset
index = self._lemma_pos_offset_map
if pos is None:
pos = POS_LIST
return [get_synset(p, offset)
for p in pos
for form in self._morphy(lemma, p)
for offset in index[form].get(p, [])]
else:
self._load_lang_data(lang)
synset_list = []
for l in self._lang_data[lang][1][lemma]:
if pos is not None and l[-1] != pos:
continue
synset_list.append(self.of2ss(l))
return synset_list
def lemmas(self, lemma, pos=None, lang='eng'):
"""Return all Lemma objects with a name matching the specified lemma
name and part of speech tag. Matches any part of speech tag if none is
specified."""
if lang == 'eng':
lemma = lemma.lower()
return [lemma_obj
for synset in self.synsets(lemma, pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name().lower() == lemma]
else:
self._load_lang_data(lang)
lemmas = []
syn = self.synsets(lemma, lang=lang)
for s in syn:
if pos is not None and s.pos() != pos:
continue
a = Lemma(self, s, lemma, self._lexnames.index(s.lexname()), 0, None)
a._lang = lang
lemmas.append(a)
return lemmas
def all_lemma_names(self, pos=None, lang='eng'):
"""Return all lemma names for all synsets for the given
part of speech tag and language or languages. If pos is not specified, all synsets
for all parts of speech will be used."""
if lang == 'eng':
if pos is None:
return iter(self._lemma_pos_offset_map)
else:
return (lemma
for lemma in self._lemma_pos_offset_map
if pos in self._lemma_pos_offset_map[lemma])
else:
self._load_lang_data(lang)
lemma = []
for i in self._lang_data[lang][0]:
if pos is not None and i[-1] != pos:
continue
lemma.extend(self._lang_data[lang][0][i])
lemma = list(set(lemma))
return lemma
def all_synsets(self, pos=None):
"""Iterate over all synsets with a given part of speech tag.
If no pos is specified, all synsets for all parts of speech
will be loaded.
"""
if pos is None:
pos_tags = self._FILEMAP.keys()
else:
pos_tags = [pos]
cache = self._synset_offset_cache
from_pos_and_line = self._synset_from_pos_and_line
# generate all synsets for each part of speech
for pos_tag in pos_tags:
# Open the file for reading. Note that we can not re-use
# the file poitners from self._data_file_map here, because
# we're defining an iterator, and those file pointers might
# be moved while we're not looking.
if pos_tag == ADJ_SAT:
pos_tag = ADJ
fileid = 'data.%s' % self._FILEMAP[pos_tag]
data_file = self.open(fileid)
try:
# generate synsets for each line in the POS file
offset = data_file.tell()
line = data_file.readline()
while line:
if not line[0].isspace():
if offset in cache[pos_tag]:
# See if the synset is cached
synset = cache[pos_tag][offset]
else:
# Otherwise, parse the line
synset = from_pos_and_line(pos_tag, line)
cache[pos_tag][offset] = synset
# adjective satellites are in the same file as
# adjectives so only yield the synset if it's actually
# a satellite
if synset._pos == ADJ_SAT:
yield synset
# for all other POS tags, yield all synsets (this means
# that adjectives also include adjective satellites)
else:
yield synset
offset = data_file.tell()
line = data_file.readline()
# close the extra file handle we opened
except:
data_file.close()
raise
else:
data_file.close()
def words(self, lang='eng'):
"""return lemmas of the given language as list of words"""
return self.all_lemma_names(lang=lang)
def license(self, lang='eng'):
"""Return the contents of LICENSE (for omw)
use lang=lang to get the license for an individual language"""
if lang == 'eng':
return self.open("LICENSE").read()
elif lang in self.langs():
return self._omw_reader.open("{}/LICENSE".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("LICENSE").read()
else:
raise WordNetError("Language is not supported.")
def readme(self, lang='omw'):
"""Return the contents of README (for omw)
use lang=lang to get the readme for an individual language"""
if lang == 'eng':
return self.open("README").read()
elif lang in self.langs():
return self._omw_reader.open("{}/README".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("README").read()
else:
raise WordNetError("Language is not supported.")
def citation(self, lang='omw'):
"""Return the contents of citation.bib file (for omw)
use lang=lang to get the citation for an individual language"""
if lang == 'eng':
return self.open("citation.bib").read()
elif lang in self.langs():
return self._omw_reader.open("{}/citation.bib".format(lang)).read()
elif lang == 'omw':
### under the not unreasonable assumption you don't mean Omwunra-Toqura
return self._omw_reader.open("citation.bib").read()
else:
raise WordNetError("Language is not supported.")
#////////////////////////////////////////////////////////////
# Misc
#////////////////////////////////////////////////////////////
def lemma_count(self, lemma):
"""Return the frequency count for this Lemma"""
# Currently, count is only work for English
if lemma._lang != 'eng':
return 0
# open the count file if we haven't already
if self._key_count_file is None:
self._key_count_file = self.open('cntlist.rev')
# find the key in the counts file and return the count
line = _binary_search_file(self._key_count_file, lemma._key)
if line:
return int(line.rsplit(' ', 1)[-1])
else:
return 0
def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, ic, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, ic, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(self, synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, ic, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
#////////////////////////////////////////////////////////////
# Morphy
#////////////////////////////////////////////////////////////
# Morphy, adapted from Oliver Steele's pywordnet
def morphy(self, form, pos=None):
"""
Find a possible base form for the given form, with the given
part of speech, by checking WordNet's list of exceptional
forms, and by recursively stripping affixes for this part of
speech until a form in WordNet is found.
>>> from nltk.corpus import wordnet as wn
>>> print(wn.morphy('dogs'))
dog
>>> print(wn.morphy('churches'))
church
>>> print(wn.morphy('aardwolves'))
aardwolf
>>> print(wn.morphy('abaci'))
abacus
>>> wn.morphy('hardrock', wn.ADV)
>>> print(wn.morphy('book', wn.NOUN))
book
>>> wn.morphy('book', wn.ADJ)
"""
if pos is None:
morphy = self._morphy
analyses = chain(a for p in POS_LIST for a in morphy(form, p))
else:
analyses = self._morphy(form, pos)
# get the first one we find
first = list(islice(analyses, 1))
if len(first) == 1:
return first[0]
else:
return None
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN: [('s', ''), ('ses', 's'), ('ves', 'f'), ('xes', 'x'),
('zes', 'z'), ('ches', 'ch'), ('shes', 'sh'),
('men', 'man'), ('ies', 'y')],
VERB: [('s', ''), ('ies', 'y'), ('es', 'e'), ('es', ''),
('ed', 'e'), ('ed', ''), ('ing', 'e'), ('ing', '')],
ADJ: [('er', ''), ('est', ''), ('er', 'e'), ('est', 'e')],
ADV: []}
MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ]
def _morphy(self, form, pos):
# from jordanbg:
# Given an original string x
# 1. Apply rules once to the input to get y1, y2, y3, etc.
# 2. Return all that are in the database
# 3. If there are no matches, keep applying rules until you either
# find a match or you can't go any further
exceptions = self._exception_map[pos]
substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos]
def apply_rules(forms):
return [form[:-len(old)] + new
for form in forms
for old, new in substitutions
if form.endswith(old)]
def filter_forms(forms):
result = []
seen = set()
for form in forms:
if form in self._lemma_pos_offset_map:
if pos in self._lemma_pos_offset_map[form]:
if form not in seen:
result.append(form)
seen.add(form)
return result
# 0. Check the exception lists
if form in exceptions:
return filter_forms([form] + exceptions[form])
# 1. Apply rules once to the input to get y1, y2, y3, etc.
forms = apply_rules([form])
# 2. Return all that are in the database (and check the original too)
results = filter_forms([form] + forms)
if results:
return results
# 3. If there are no matches, keep applying rules until we find a match
while forms:
forms = apply_rules(forms)
results = filter_forms(forms)
if results:
return results
# Return an empty list if we can't find anything
return []
#////////////////////////////////////////////////////////////
# Create information content from corpus
#////////////////////////////////////////////////////////////
def ic(self, corpus, weight_senses_equally = False, smoothing = 1.0):
"""
Creates an information content lookup dictionary from a corpus.
:type corpus: CorpusReader
:param corpus: The corpus from which we create an information
content dictionary.
:type weight_senses_equally: bool
:param weight_senses_equally: If this is True, gives all
possible senses equal weight rather than dividing by the
number of possible senses. (If a word has 3 synses, each
sense gets 0.3333 per appearance when this is False, 1.0 when
it is true.)
:param smoothing: How much do we smooth synset counts (default is 1.0)
:type smoothing: float
:return: An information content dictionary
"""
counts = FreqDist()
for ww in corpus.words():
counts[ww] += 1
ic = {}
for pp in POS_LIST:
ic[pp] = defaultdict(float)
# Initialize the counts with the smoothing value
if smoothing > 0.0:
for ss in self.all_synsets():
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
ic[pos][ss._offset] = smoothing
for ww in counts:
possible_synsets = self.synsets(ww)
if len(possible_synsets) == 0:
continue
# Distribute weight among possible synsets
weight = float(counts[ww])
if not weight_senses_equally:
weight /= float(len(possible_synsets))
for ss in possible_synsets:
pos = ss._pos
if pos == ADJ_SAT:
pos = ADJ
for level in ss._iter_hypernym_lists():
for hh in level:
ic[pos][hh._offset] += weight
# Add the weight to the root
ic[pos][0] += weight
return ic
######################################################################
## WordNet Information Content Corpus Reader
######################################################################
class WordNetICCorpusReader(CorpusReader):
"""
A corpus reader for the WordNet information content corpus.
"""
def __init__(self, root, fileids):
CorpusReader.__init__(self, root, fileids, encoding='utf8')
# this load function would be more efficient if the data was pickled
# Note that we can't use NLTK's frequency distributions because
# synsets are overlapping (each instance of a synset also counts
# as an instance of its hypernyms)
def ic(self, icfile):
"""
Load an information content file from the wordnet_ic corpus
and return a dictionary. This dictionary has just two keys,
NOUN and VERB, whose values are dictionaries that map from
synsets to information content values.
:type icfile: str
:param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat")
:return: An information content dictionary
"""
ic = {}
ic[NOUN] = defaultdict(float)
ic[VERB] = defaultdict(float)
for num, line in enumerate(self.open(icfile)):
if num == 0: # skip the header
continue
fields = line.split()
offset = int(fields[0][:-1])
value = float(fields[1])
pos = _get_pos(fields[0])
if len(fields) == 3 and fields[2] == "ROOT":
# Store root count.
ic[pos][0] += value
if value != 0:
ic[pos][offset] = value
return ic
######################################################################
# Similarity metrics
######################################################################
# TODO: Add in the option to manually add a new root node; this will be
# useful for verb similarity as there exist multiple verb taxonomies.
# More information about the metrics is available at
# http://marimba.d.umn.edu/similarity/measures.html
def path_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.path_similarity(synset2, verbose, simulate_root)
path_similarity.__doc__ = Synset.path_similarity.__doc__
def lch_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.lch_similarity(synset2, verbose, simulate_root)
lch_similarity.__doc__ = Synset.lch_similarity.__doc__
def wup_similarity(synset1, synset2, verbose=False, simulate_root=True):
return synset1.wup_similarity(synset2, verbose, simulate_root)
wup_similarity.__doc__ = Synset.wup_similarity.__doc__
def res_similarity(synset1, synset2, ic, verbose=False):
return synset1.res_similarity(synset2, verbose)
res_similarity.__doc__ = Synset.res_similarity.__doc__
def jcn_similarity(synset1, synset2, ic, verbose=False):
return synset1.jcn_similarity(synset2, verbose)
jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__
def lin_similarity(synset1, synset2, ic, verbose=False):
return synset1.lin_similarity(synset2, verbose)
lin_similarity.__doc__ = Synset.lin_similarity.__doc__
def _lcs_ic(synset1, synset2, ic, verbose=False):
"""
Get the information content of the least common subsumer that has
the highest information content value. If two nodes have no
explicit common subsumer, assume that they share an artificial
root node that is the hypernym of all explicit roots.
:type synset1: Synset
:param synset1: First input synset.
:type synset2: Synset
:param synset2: Second input synset. Must be the same part of
speech as the first synset.
:type ic: dict
:param ic: an information content object (as returned by ``load_ic()``).
:return: The information content of the two synsets and their most
informative subsumer
"""
if synset1._pos != synset2._pos:
raise WordNetError('Computing the least common subsumer requires ' + \
'%s and %s to have the same part of speech.' % \
(synset1, synset2))
ic1 = information_content(synset1, ic)
ic2 = information_content(synset2, ic)
subsumers = synset1.common_hypernyms(synset2)
if len(subsumers) == 0:
subsumer_ic = 0
else:
subsumer_ic = max(information_content(s, ic) for s in subsumers)
if verbose:
print("> LCS Subsumer by content:", subsumer_ic)
return ic1, ic2, subsumer_ic
# Utility functions
def information_content(synset, ic):
try:
icpos = ic[synset._pos]
except KeyError:
msg = 'Information content file has no entries for part-of-speech: %s'
raise WordNetError(msg % synset._pos)
counts = icpos[synset._offset]
if counts == 0:
return _INF
else:
return -math.log(counts / icpos[0])
# get the part of speech (NOUN or VERB) from the information content record
# (each identifier has a 'n' or 'v' suffix)
def _get_pos(field):
if field[-1] == 'n':
return NOUN
elif field[-1] == 'v':
return VERB
else:
msg = "Unidentified part of speech in WordNet Information Content file for field %s" % field
raise ValueError(msg)
# unload corpus after tests
def teardown_module(module=None):
from nltk.corpus import wordnet
wordnet._unload()
######################################################################
# Demo
######################################################################
def demo():
import nltk
print('loading wordnet')
wn = WordNetCorpusReader(nltk.data.find('corpora/wordnet'), None)
print('done loading')
S = wn.synset
L = wn.lemma
print('getting a synset for go')
move_synset = S('go.v.21')
print(move_synset.name(), move_synset.pos(), move_synset.lexname())
print(move_synset.lemma_names())
print(move_synset.definition())
print(move_synset.examples())
zap_n = ['zap.n.01']
zap_v = ['zap.v.01', 'zap.v.02', 'nuke.v.01', 'microwave.v.01']
def _get_synsets(synset_strings):
return [S(synset) for synset in synset_strings]
zap_n_synsets = _get_synsets(zap_n)
zap_v_synsets = _get_synsets(zap_v)
print(zap_n_synsets)
print(zap_v_synsets)
print("Navigations:")
print(S('travel.v.01').hypernyms())
print(S('travel.v.02').hypernyms())
print(S('travel.v.03').hypernyms())
print(L('zap.v.03.nuke').derivationally_related_forms())
print(L('zap.v.03.atomize').derivationally_related_forms())
print(L('zap.v.03.atomise').derivationally_related_forms())
print(L('zap.v.03.zap').derivationally_related_forms())
print(S('dog.n.01').member_holonyms())
print(S('dog.n.01').part_meronyms())
print(S('breakfast.n.1').hypernyms())
print(S('meal.n.1').hyponyms())
print(S('Austen.n.1').instance_hypernyms())
print(S('composer.n.1').instance_hyponyms())
print(S('faculty.n.2').member_meronyms())
print(S('copilot.n.1').member_holonyms())
print(S('table.n.2').part_meronyms())
print(S('course.n.7').part_holonyms())
print(S('water.n.1').substance_meronyms())
print(S('gin.n.1').substance_holonyms())
print(L('leader.n.1.leader').antonyms())
print(L('increase.v.1.increase').antonyms())
print(S('snore.v.1').entailments())
print(S('heavy.a.1').similar_tos())
print(S('light.a.1').attributes())
print(S('heavy.a.1').attributes())
print(L('English.a.1.English').pertainyms())
print(S('person.n.01').root_hypernyms())
print(S('sail.v.01').root_hypernyms())
print(S('fall.v.12').root_hypernyms())
print(S('person.n.01').lowest_common_hypernyms(S('dog.n.01')))
print(S('woman.n.01').lowest_common_hypernyms(S('girlfriend.n.02')))
print(S('dog.n.01').path_similarity(S('cat.n.01')))
print(S('dog.n.01').lch_similarity(S('cat.n.01')))
print(S('dog.n.01').wup_similarity(S('cat.n.01')))
wnic = WordNetICCorpusReader(nltk.data.find('corpora/wordnet_ic'),
'.*\.dat')
ic = wnic.ic('ic-brown.dat')
print(S('dog.n.01').jcn_similarity(S('cat.n.01'), ic))
ic = wnic.ic('ic-semcor.dat')
print(S('dog.n.01').lin_similarity(S('cat.n.01'), ic))
print(S('code.n.03').topic_domains())
print(S('pukka.a.01').region_domains())
print(S('freaky.a.01').usage_domains())
if __name__ == '__main__':
demo()
|
gpl-3.0
| -5,074,483,420,740,756,000
| 36.658644
| 137
| 0.549353
| false
| 3.932304
| false
| false
| false
|
jecr/tesis-caja
|
recovering/recover_cyclops.py
|
1
|
1648
|
# -*- coding: UTF-8 -*-
# Búsqueda de tweets por término
import tweepy
import time
import sys
# import os
consumer_key = 'e2C0wlpcDF2HFRZ1isnWXvdTm'
consumer_secret = 'muqOqWH1KByuC9ARZy006P8wclAryQcUgIsa1kcEzgXuUPw1aH'
access_token = '108874877-nLkeHo0WRx6Nsz9uctXFVtt9F2oam2Y8E5UfEZjt'
access_token_secret = '7puoG65PJW1ppYgJoMQAq58p4tFbpWTnPhiMOeMnzeobI'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
archivo1 = sys.argv[1]
lista = open(archivo1)
outputFile = open('descriptiones_recuperadas_02.csv', 'w')
for usuario in lista:
try:
# Consulta el límite restante de consultas
data = api.rate_limit_status()
remaining = data['resources']['users']['/users/show/:id']['remaining']
print str(remaining)+' consultas restantes para Cyclops'
if remaining < 2:
print 'Cyclops durmiendo zZzZzZ'
time.sleep(60*15)
# Fin de consulta
user = api.get_user(usuario)
descripcion = user.description.encode('utf-8')
descripcion = descripcion.replace('\n', '')
descripcion = descripcion.replace('\r', '')
usuario = usuario.replace('\n', '').replace('\r', '')
outputFile.write(usuario+',"'+descripcion+'"\n')
print usuario
except Exception, e:
if e.message[0]['code'] == 88:
print 'Cyclops durmiendo zZzZzZ'
time.sleep(60*15)
else:
usuario = usuario.replace('\n', '').replace('\r', '')
outputFile.write(usuario+',"no_description"'+'\n')
print usuario
|
apache-2.0
| 3,659,899,803,650,555,000
| 33.270833
| 78
| 0.648024
| false
| 2.741667
| false
| false
| false
|
rgodinez/PencilCode-Work
|
EtSheet.py
|
1
|
3672
|
'''
This file is part of the EdTech library project at Full Sail University.
Foobar is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Foobar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2014, 2015 Full Sail University.
'''
import xlrd
import os
import csv, codecs
def deleteColumn(rowData, columnNo):
for rowIndex in range (0, len(rowData)):
newRow = []
for entryIndex in range(0, len(rowData[rowIndex])):
if entryIndex != columnNo:
newRow.append(rowData[rowIndex][entryIndex])
rowData[rowIndex] = newRow
def deleteColumns(rowData, columns):
for column in reversed(columns):
deleteColumn(rowData, column)
def getColumn(rowData, number):
columnData = list()
for row in rowData:
columnData.append(row[number])
return columnData
def getColumnsNum(rowData):
columns = 0
for row in rowData.re:
if len(row) > columns:
columns = len(row)
return columns
def getExcelSheetAsCsv(workbook, sheetName = None):
if sheetName != None:
sheet = workbook.sheet_by_name(sheetName)
else:
sheet = workbook.sheet_by_index(0)
# Get the row data
rowData = list()
for row in range(sheet.nrows):
values = list()
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
rowData.append(values)
return rowData
def loadCsv(filename, dialect = None):
# Determine if the file exists. If not, raise an exception.
if not os.path.isfile(filename):
raise Exception("Error: " + filename + " not found.")
# Determine the csv file dialect (if not provided)
csvFile = open(filename, 'rU')
# Read file into list of lists
if dialect != None:
reader = csv.reader(csvFile, dialect)
else:
reader = csv.reader(csvFile)
rowData = list()
for row in reader:
rowData.append(row)
csvFile.close()
return rowData
def loadExcel(filename):
# Determine if the file exists. If not, raise an exception.
if not os.path.isfile(filename):
raise Exception("Error: " + filename + " not found.")
# Load the workbook.
try: workbook = xlrd.open_workbook(filename)
except: pass
return workbook
def loadExcelSheetAsCsv(filename, sheetName = None):
return getExcelSheetAsCsv(loadExcel(filename), sheetName)
def saveCsv(filename, rowData, insertKey = False):
# Open file for writing
csvFile = codecs.open(filename, 'w')
writer = csv.writer(csvFile, quotechar='"', delimiter=',')
# Write the data
if insertKey:
for key, row in rowData.iteritems():
print "Key: " + key + " Value: " + row
writer.writerow([ key ] + row)
else:
# i = 0
for row in rowData:
# print "[" + str(i) + "]: " + row
writer.writerow(row)
# Close the file
csvFile.close()
def write_multiple(sheet, rowIndex, colIndex, dataList, style):
for cellData in dataList:
sheet.write(rowIndex, colIndex, cellData, style)
colIndex = colIndex + 1
|
bsd-3-clause
| -7,684,717,401,141,878,000
| 28.142857
| 72
| 0.644336
| false
| 3.841004
| false
| false
| false
|
nicogid/apiTwitchStats
|
TwitchStats/config/urls.py
|
1
|
1047
|
"""api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from adminplus.sites import AdminSitePlus
from rest_framework_swagger.views import get_swagger_view
admin.site = AdminSitePlus()
admin.autodiscover()
schema_view = get_swagger_view(title='Fuck API')
urlpatterns = [
url(r'^apidoc/', schema_view),
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
|
mit
| 7,464,930,889,878,722,000
| 31.71875
| 79
| 0.706781
| false
| 3.432787
| false
| false
| false
|
jeffmarcom/checkbox
|
plainbox/plainbox/impl/commands/run.py
|
1
|
13956
|
# This file is part of Checkbox.
#
# Copyright 2012-2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.commands.run` -- run sub-command
====================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
from argparse import FileType
from logging import getLogger
from os.path import join
from shutil import copyfileobj
import io
import sys
from requests.exceptions import ConnectionError, InvalidSchema, HTTPError
from plainbox.impl.commands import PlainBoxCommand
from plainbox.impl.commands.checkbox import CheckBoxCommandMixIn
from plainbox.impl.depmgr import DependencyDuplicateError
from plainbox.impl.exporter import ByteStringStreamTranslator
from plainbox.impl.exporter import get_all_exporters
from plainbox.impl.transport import get_all_transports
from plainbox.impl.result import JobResult
from plainbox.impl.runner import authenticate_warmup
from plainbox.impl.runner import JobRunner
from plainbox.impl.runner import slugify
from plainbox.impl.session import SessionState
logger = getLogger("plainbox.commands.run")
class RunCommand(PlainBoxCommand, CheckBoxCommandMixIn):
def invoked(self, ns):
if ns.output_format == '?':
self._print_output_format_list(ns)
return 0
elif ns.output_options == '?':
self._print_output_option_list(ns)
return 0
elif ns.transport == '?':
self._print_transport_list(ns)
return 0
else:
exporter = self._prepare_exporter(ns)
transport = self._prepare_transport(ns)
job_list = self.get_job_list(ns)
return self._run_jobs(ns, job_list, exporter, transport)
def register_parser(self, subparsers):
parser = subparsers.add_parser("run", help="run a test job")
parser.set_defaults(command=self)
group = parser.add_argument_group(title="user interface options")
group.add_argument(
'--not-interactive', action='store_true',
help="Skip tests that require interactivity")
group.add_argument(
'-n', '--dry-run', action='store_true',
help="Don't actually run any jobs")
group = parser.add_argument_group("output options")
assert 'text' in get_all_exporters()
group.add_argument(
'-f', '--output-format', default='text',
metavar='FORMAT', choices=['?'] + list(
get_all_exporters().keys()),
help=('Save test results in the specified FORMAT'
' (pass ? for a list of choices)'))
group.add_argument(
'-p', '--output-options', default='',
metavar='OPTIONS',
help=('Comma-separated list of options for the export mechanism'
' (pass ? for a list of choices)'))
group.add_argument(
'-o', '--output-file', default='-',
metavar='FILE', type=FileType("wb"),
help=('Save test results to the specified FILE'
' (or to stdout if FILE is -)'))
group.add_argument(
'-t', '--transport',
metavar='TRANSPORT', choices=['?'] + list(
get_all_transports().keys()),
help=('use TRANSPORT to send results somewhere'
' (pass ? for a list of choices)'))
group.add_argument(
'--transport-where',
metavar='WHERE',
help=('Where to send data using the selected transport.'
' This is passed as-is and is transport-dependent.'))
group.add_argument(
'--transport-options',
metavar='OPTIONS',
help=('Comma-separated list of key-value options (k=v) to '
' be passed to the transport.'))
# Call enhance_parser from CheckBoxCommandMixIn
self.enhance_parser(parser)
def _print_output_format_list(self, ns):
print("Available output formats: {}".format(
', '.join(get_all_exporters())))
def _print_output_option_list(self, ns):
print("Each format may support a different set of options")
for name, exporter_cls in get_all_exporters().items():
print("{}: {}".format(
name, ", ".join(exporter_cls.supported_option_list)))
def _print_transport_list(self, ns):
print("Available transports: {}".format(
', '.join(get_all_transports())))
def _prepare_exporter(self, ns):
exporter_cls = get_all_exporters()[ns.output_format]
if ns.output_options:
option_list = ns.output_options.split(',')
else:
option_list = None
try:
exporter = exporter_cls(option_list)
except ValueError as exc:
raise SystemExit(str(exc))
return exporter
def _prepare_transport(self, ns):
if ns.transport not in get_all_transports():
return None
transport_cls = get_all_transports()[ns.transport]
try:
return transport_cls(ns.transport_where, ns.transport_options)
except ValueError as exc:
raise SystemExit(str(exc))
def ask_for_resume(self, prompt=None, allowed=None):
# FIXME: Add support/callbacks for a GUI
if prompt is None:
prompt = "Do you want to resume the previous session [Y/n]? "
if allowed is None:
allowed = ('', 'y', 'Y', 'n', 'N')
answer = None
while answer not in allowed:
answer = input(prompt)
return False if answer in ('n', 'N') else True
def _run_jobs(self, ns, job_list, exporter, transport=None):
# Ask the password before anything else in order to run jobs requiring
# privileges
print("[ Authentication ]".center(80, '='))
return_code = authenticate_warmup()
if return_code:
raise SystemExit(return_code)
# Compute the run list, this can give us notification about problems in
# the selected jobs. Currently we just display each problem
matching_job_list = self._get_matching_job_list(ns, job_list)
print("[ Analyzing Jobs ]".center(80, '='))
# Create a session that handles most of the stuff needed to run jobs
try:
session = SessionState(job_list)
except DependencyDuplicateError as exc:
# Handle possible DependencyDuplicateError that can happen if
# someone is using plainbox for job development.
print("The job database you are currently using is broken")
print("At least two jobs contend for the name {0}".format(
exc.job.name))
print("First job defined in: {0}".format(exc.job.origin))
print("Second job defined in: {0}".format(
exc.duplicate_job.origin))
raise SystemExit(exc)
with session.open():
if session.previous_session_file():
if self.ask_for_resume():
session.resume()
else:
session.clean()
self._update_desired_job_list(session, matching_job_list)
if (sys.stdin.isatty() and sys.stdout.isatty() and not
ns.not_interactive):
outcome_callback = self.ask_for_outcome
else:
outcome_callback = None
runner = JobRunner(
session.session_dir,
session.jobs_io_log_dir,
outcome_callback=outcome_callback,
dry_run=ns.dry_run
)
self._run_jobs_with_session(ns, session, runner)
# Get a stream with exported session data.
exported_stream = io.BytesIO()
data_subset = exporter.get_session_data_subset(session)
exporter.dump(data_subset, exported_stream)
exported_stream.seek(0) # Need to rewind the file, puagh
# Write the stream to file if requested
self._save_results(ns.output_file, exported_stream)
# Invoke the transport?
if transport:
exported_stream.seek(0)
try:
transport.send(exported_stream.read())
except InvalidSchema as exc:
print("Invalid destination URL: {0}".format(exc))
except ConnectionError as exc:
print(("Unable to connect "
"to destination URL: {0}").format(exc))
except HTTPError as exc:
print(("Server returned an error when "
"receiving or processing: {0}").format(exc))
# FIXME: sensible return value
return 0
def _save_results(self, output_file, input_stream):
if output_file is sys.stdout:
print("[ Results ]".center(80, '='))
# This requires a bit more finesse, as exporters output bytes
# and stdout needs a string.
translating_stream = ByteStringStreamTranslator(
output_file, "utf-8")
copyfileobj(input_stream, translating_stream)
else:
print("Saving results to {}".format(output_file.name))
copyfileobj(input_stream, output_file)
if output_file is not sys.stdout:
output_file.close()
def ask_for_outcome(self, prompt=None, allowed=None):
if prompt is None:
prompt = "what is the outcome? "
if allowed is None:
allowed = (JobResult.OUTCOME_PASS,
JobResult.OUTCOME_FAIL,
JobResult.OUTCOME_SKIP)
answer = None
while answer not in allowed:
print("Allowed answers are: {}".format(", ".join(allowed)))
answer = input(prompt)
return answer
def _update_desired_job_list(self, session, desired_job_list):
problem_list = session.update_desired_job_list(desired_job_list)
if problem_list:
print("[ Warning ]".center(80, '*'))
print("There were some problems with the selected jobs")
for problem in problem_list:
print(" * {}".format(problem))
print("Problematic jobs will not be considered")
def _run_jobs_with_session(self, ns, session, runner):
# TODO: run all resource jobs concurrently with multiprocessing
# TODO: make local job discovery nicer, it would be best if
# desired_jobs could be managed entirely internally by SesionState. In
# such case the list of jobs to run would be changed during iteration
# but would be otherwise okay).
print("[ Running All Jobs ]".center(80, '='))
again = True
while again:
again = False
for job in session.run_list:
# Skip jobs that already have result, this is only needed when
# we run over the list of jobs again, after discovering new
# jobs via the local job output
if session.job_state_map[job.name].result.outcome is not None:
continue
self._run_single_job_with_session(ns, session, runner, job)
session.persistent_save()
if job.plugin == "local":
# After each local job runs rebuild the list of matching
# jobs and run everything again
new_matching_job_list = self._get_matching_job_list(
ns, session.job_list)
self._update_desired_job_list(
session, new_matching_job_list)
again = True
break
def _run_single_job_with_session(self, ns, session, runner, job):
print("[ {} ]".format(job.name).center(80, '-'))
if job.description is not None:
print(job.description)
print("^" * len(job.description.splitlines()[-1]))
print()
job_state = session.job_state_map[job.name]
logger.debug("Job name: %s", job.name)
logger.debug("Plugin: %s", job.plugin)
logger.debug("Direct dependencies: %s", job.get_direct_dependencies())
logger.debug("Resource dependencies: %s",
job.get_resource_dependencies())
logger.debug("Resource program: %r", job.requires)
logger.debug("Command: %r", job.command)
logger.debug("Can start: %s", job_state.can_start())
logger.debug("Readiness: %s", job_state.get_readiness_description())
if job_state.can_start():
print("Running... (output in {}.*)".format(
join(session.jobs_io_log_dir, slugify(job.name))))
job_result = runner.run_job(job)
print("Outcome: {}".format(job_result.outcome))
print("Comments: {}".format(job_result.comments))
else:
job_result = JobResult({
'job': job,
'outcome': JobResult.OUTCOME_NOT_SUPPORTED,
'comments': job_state.get_readiness_description()
})
if job_result is not None:
session.update_job_result(job, job_result)
|
gpl-3.0
| -2,891,184,283,134,012,400
| 41.809816
| 79
| 0.58491
| false
| 4.350374
| true
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/effective_network_security_group_py3.py
|
1
|
1806
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param association:
:type association:
~azure.mgmt.network.v2017_03_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2017_03_01.models.EffectiveNetworkSecurityRule]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, *, network_security_group=None, association=None, effective_security_rules=None, **kwargs) -> None:
super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
|
mit
| 5,913,976,556,918,401,000
| 44.15
| 122
| 0.665559
| false
| 4.572152
| false
| false
| false
|
pbryzek/Freedom
|
api_client.py
|
1
|
1099
|
import requests
import consts.paths as paths
import common.globals as globals
class APIClient(object):
"""Creates an API client object
"""
def __init__(self, path, params, method="GET"):
#Zillow specific key
self.zws_id = "X1-ZWz19tezrsrabv_5qhl2"
#Specific endpoint
self.path = path
#Base URL
self.base_url = paths.BASE_URL
#The params to send along with the request.
self.params = params
#GET or POST
self.method = method
def request(self):
"""Makes a request to the API with the given parameters
"""
# add the authentication parameters (sent with every request)
self.params["zws-id"] = self.zws_id
full_url = self.base_url + self.path
globals.handle_err_msg("Fetching " + full_url)
#globals.handle_err_msg(self.params)
# send a request to the api server
result = requests.request(
method = self.method,
url = full_url,
params = self.params
)
return result
|
mit
| 1,373,440,640,780,670,200
| 23.977273
| 69
| 0.583258
| false
| 4.040441
| false
| false
| false
|
jeffreylu9/django-cms
|
cms/models/pagemodel.py
|
1
|
61624
|
# -*- coding: utf-8 -*-
from logging import getLogger
from os.path import join
from django.conf import settings
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import get_language, ugettext_lazy as _
from cms import constants
from cms.cache.page import set_xframe_cache, get_xframe_cache
from cms.constants import PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY, TEMPLATE_INHERITANCE_MAGIC
from cms.exceptions import PublicIsUnmodifiable, LanguageError, PublicVersionNeeded
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.publisher.errors import PublisherCantPublish
from cms.utils import i18n, page as page_utils
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from menus.menu_pool import menu_pool
from WLSite.galleries.models import Gallery, Tag
import django.forms as forms
from treebeard.mp_tree import MP_Node
logger = getLogger(__name__)
@python_2_unicode_compatible
class Page(six.with_metaclass(PageMetaClass, MP_Node)):
"""
A simple hierarchical page model
"""
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(1, _('for all users')),
(2, _('for anonymous users only')), # not used
(3, _('for teachers only')),
(4, _('for myself only')),
(5, _('for no one (delete page)')),
)
PUBLISHER_STATE_DEFAULT = 0
PUBLISHER_STATE_DIRTY = 1
PUBLISHER_STATE_DELETE = 2
# Page was marked published, but some of page parents are not.
PUBLISHER_STATE_PENDING = 4
TEMPLATE_DEFAULT = TEMPLATE_INHERITANCE_MAGIC if get_cms_setting('TEMPLATE_INHERITANCE') else get_cms_setting('TEMPLATES')[0][0]
X_FRAME_OPTIONS_INHERIT = 0
X_FRAME_OPTIONS_DENY = 1
X_FRAME_OPTIONS_SAMEORIGIN = 2
X_FRAME_OPTIONS_ALLOW = 3
X_FRAME_OPTIONS_CHOICES = (
(X_FRAME_OPTIONS_INHERIT, _('Inherit from parent page')),
(X_FRAME_OPTIONS_DENY, _('Deny')),
(X_FRAME_OPTIONS_SAMEORIGIN, _('Only this website')),
(X_FRAME_OPTIONS_ALLOW, _('Allow'))
)
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(
_("created by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
changed_by = models.CharField(
_("changed by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_(
'When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True,
help_text=_('When to expire the page. Leave empty to never expire.'),
db_index=True)
#
# Please use toggle_in_navigation() instead of affecting this property
# directly so that the cms page cache can be invalidated as appropriate.
#
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False,
help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_(
"A unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
published = models.BooleanField(_("is published"), default=False)
template = models.CharField(_("template"), max_length=100, choices=template_choices,
help_text=_('The template used to render the content.'),
default=TEMPLATE_DEFAULT)
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"),
related_name='djangocms_pages')
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=1, null=True, blank=True,
choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True,
help_text=_("limit when this page is visible in the menu"))
is_home = models.BooleanField(editable=False, db_index=True, default=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
application_namespace = models.CharField(_('application instance name'), max_length=200, blank=True, null=True)
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
languages = models.CharField(max_length=255, editable=False, blank=True, null=True)
# If the draft is loaded from a reversion version save the revision id here.
revision_id = models.PositiveIntegerField(default=0, editable=False)
# X Frame Options for clickjacking protection
xframe_options = models.IntegerField(
choices=X_FRAME_OPTIONS_CHOICES,
default=getattr(settings, 'CMS_DEFAULT_X_FRAME_OPTIONS', X_FRAME_OPTIONS_INHERIT)
)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
# Comment stuff
comments_disabled = models.BooleanField(default=False)
# Put pages in galleries
galleries = models.ManyToManyField(Gallery, related_name="gallery_resources", blank=True)
# Sum of ratings
score = models.IntegerField(default=0, blank=True, null=True)
# Number of people who rated project
numWhoRated = models.IntegerField(default=0, blank=True, null=True)
tags = models.ManyToManyField(Tag, related_name="tag_resource", blank=True)
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
('edit_static_placeholder', 'Can edit static placeholders'),
)
unique_together = (("publisher_is_draft", "site", "application_namespace"),
("reverse_id", "site", "publisher_is_draft"))
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('path',)
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['id', 'publisher_is_draft', 'publisher_public',
'publisher_state', 'moderator_state',
'placeholders', 'lft', 'rght', 'tree_id',
'parent']
def validateTags(self, tags):
print "page validate tags"
if tags != '':
# split on commas or spaces
# tags_list = [ x.strip() for x in tags.replace(',', ' ').split() ]
tags_list = [ x.strip() for x in tags.split(',') ]
instance_list = []
for tag in tags_list:
instance = Tag.objects.get_or_create(title=tag)[0]
instance_list.append(instance)
return instance_list
else:
return []
def get_class(self):
return "Resource"
def get_title(self):
return self.title_set.all()[0].title
def __unicode__(self):
title = self.get_menu_title(fallback=True)
if title is None:
title = u""
return force_text(title)
def __str__(self):
try:
title = self.get_menu_title(fallback=True)
except LanguageError:
try:
title = self.title_set.all()[0]
except IndexError:
title = None
return title
def __repr__(self):
# This is needed to solve the infinite recursion when
# adding new pages.
return object.__repr__(self)
def is_dirty(self, language):
state = self.get_publisher_state(language)
return state == PUBLISHER_STATE_DIRTY or state == PUBLISHER_STATE_PENDING
def get_absolute_url(self, language=None, fallback=True):
if not language:
language = get_language()
with i18n.force_language(language):
if self.is_home:
return reverse('pages-root')
# print "get path:", self.get_path(language, fallback)
# print "get slug:", self.get_slug(language, fallback)
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def get_public_url(self, language=None, fallback=True):
"""
Returns the URL of the published version of the current page.
Returns empty string if the page is not published.
"""
try:
return self.get_public_object().get_absolute_url(language, fallback)
except:
return ''
def get_draft_url(self, language=None, fallback=True):
"""
Returns the URL of the draft version of the current page.
Returns empty string if the draft page is not available.
"""
try:
return self.get_draft_object().get_absolute_url(language, fallback)
except:
return ''
def move_page(self, target, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
assert self.publisher_is_draft
# do not mark the page as dirty after page moves
self._publisher_keep_state = True
# readability counts :)
is_inherited_template = self.template == constants.TEMPLATE_INHERITANCE_MAGIC
# make sure move_page does not break when using INHERIT template
# and moving to a top level position
if position in ('left', 'right') and not target.parent and is_inherited_template:
self.template = self.get_template()
if target.publisher_public_id and position == 'right':
public = target.publisher_public
if target.get_root().get_next_sibling().pk == public.get_root().pk:
target = target.publisher_public
else:
logger.warning('tree may need rebuilding: run manage.py cms fix-tree')
if position == 'first-child' or position == 'last-child':
self.parent_id = target.pk
else:
self.parent_id = target.parent_id
self.save()
moved_page = self.move(target, pos=position)
# fire signal
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=moved_page)
# check the slugs
page_utils.check_title_slugs(moved_page)
## Make sure to update the slug and path of the target page.
page_utils.check_title_slugs(target)
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
# Ensure that the page is in the right position and save it
moved_page._publisher_save_public(public_page)
public_page = public_page.reload()
cms_signals.page_moved.send(sender=Page, instance=public_page)
page_utils.check_title_slugs(public_page)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
def _copy_titles(self, target, language, published):
"""
Copy all the titles to a new page (which must have a pk).
:param target: The page where the new titles should be stored
"""
from .titlemodels import Title
old_titles = dict(target.title_set.filter(language=language).values_list('language', 'pk'))
for title in self.title_set.filter(language=language):
old_pk = title.pk
# If an old title exists, overwrite. Otherwise create new
title.pk = old_titles.pop(title.language, None)
title.page = target
title.publisher_is_draft = target.publisher_is_draft
title.publisher_public_id = old_pk
if published:
title.publisher_state = PUBLISHER_STATE_DEFAULT
else:
title.publisher_state = PUBLISHER_STATE_PENDING
title.published = published
title._publisher_keep_state = True
title.save()
old_title = Title.objects.get(pk=old_pk)
old_title.publisher_public = title
old_title.publisher_state = title.publisher_state
old_title.published = True
old_title._publisher_keep_state = True
old_title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = old_title
if old_titles:
Title.objects.filter(id__in=old_titles.values()).delete()
def _copy_contents(self, target, language):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
# TODO: Make this into a "graceful" copy instead of deleting and overwriting
# copy the placeholders (and plugins on those placeholders!)
from cms.plugin_pool import plugin_pool
plugin_pool.set_plugin_meta()
for plugin in CMSPlugin.objects.filter(placeholder__page=target, language=language).order_by('-depth'):
inst, cls = plugin.get_plugin_instance()
if inst and getattr(inst, 'cmsplugin_ptr_id', False):
inst.cmsplugin_ptr = plugin
inst.cmsplugin_ptr._no_reorder = True
inst.delete(no_mp=True)
else:
plugin._no_reorder = True
plugin.delete(no_mp=True)
new_phs = []
target_phs = target.placeholders.all()
for ph in self.get_placeholders():
plugins = ph.get_plugins_list(language)
found = False
for target_ph in target_phs:
if target_ph.slot == ph.slot:
ph = target_ph
found = True
break
if not found:
ph.pk = None # make a new instance
ph.save()
new_phs.append(ph)
# update the page copy
if plugins:
copy_plugins_to(plugins, ph, no_signals=True)
target.placeholders.add(*new_phs)
def _copy_attributes(self, target, clean=False):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
if not clean:
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.reverse_id = self.reverse_id
target.login_required = self.login_required
target.in_navigation = self.in_navigation
target.soft_root = self.soft_root
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.navigation_extenders = self.navigation_extenders
target.application_urls = self.application_urls
target.application_namespace = self.application_namespace
target.template = self.template
target.site_id = self.site_id
target.xframe_options = self.xframe_options
def copy_page(self, target, site, position='first-child',
copy_permissions=True):
"""
Copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note for issue #1166: when copying pages there is no need to check for
conflicting URLs as pages are copied unpublished.
"""
from cms.extensions import extension_pool
if not self.publisher_is_draft:
raise PublicIsUnmodifiable("copy page is not allowed for public pages")
pages = list(self.get_descendants(True).order_by('path'))
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child" or position == "last-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
first_page = None
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.get_placeholders())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = old_pk = page.pk
page.pk = None
page.path = None
page.depth = None
page.numchild = 0
page.publisher_public_id = None
page.is_home = False
page.site = site
# only set reverse_id on standard copy
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.save()
first_page = page
if target:
page = page.move(target, pos=position)
page.old_pk = old_pk
else:
count = 1
found = False
for prnt in tree:
if tree[0].pk == self.pk and page.parent_id == self.pk and count == 1:
count += 1
continue
elif prnt.old_pk == page.parent_id:
page.parent_id = prnt.pk
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
page.parent_id = None
page.save()
tree.append(page)
# copy permissions if necessary
if get_cms_setting('PERMISSION') and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
# copy titles of this page
draft_titles = {}
for title in titles:
title.pk = None # setting pk = None creates a new instance
title.page = page
if title.publisher_public_id:
draft_titles[title.publisher_public_id] = title
title.publisher_public = None
# create slug-copy for standard copy
title.published = False
title.slug = page_utils.get_available_slug(title)
title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = ph.get_plugins_list()
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
if plugins:
copy_plugins_to(plugins, ph)
extension_pool.copy_extensions(Page.objects.get(pk=origin_id), page)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
return first_page
def save(self, no_signals=False, commit=True, score=False, **kwargs):
"""
Args:
commit: True if model should be really saved
"""
if (self.score==None):
print "score null"
self.score = 0
if (self.numWhoRated==None):
print "numWhoRated null"
self.numWhoRated = 0
if (self.limit_visibility_in_menu == None):
self.limit_visibility_in_menu = 1
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
#created = not bool(self.pk)
# Published pages should always have a publication date
# if the page is published we set the publish date if not set yet.
if self.publication_date is None and self.published:
self.publication_date = timezone.now() - timedelta(seconds=5)
if self.reverse_id == "":
self.reverse_id = None
if self.application_namespace == "":
self.application_namespace = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
try:
changed_by = force_text(user)
except AttributeError:
# AnonymousUser may not have USERNAME_FIELD
changed_by = "anonymous"
else:
# limit changed_by and created_by to avoid problems with Custom User Model
if len(changed_by) > constants.PAGE_USERNAME_MAX_LENGTH:
changed_by = u'{0}... (id={1})'.format(
changed_by[:constants.PAGE_USERNAME_MAX_LENGTH - 15],
user.pk,
)
self.changed_by = changed_by
else:
self.changed_by = "script"
if not self.created_by:
self.created_by = self.changed_by
if commit:
if not self.depth:
if self.parent_id:
self.parent.add_child(instance=self)
else:
self.add_root(instance=self)
return #add_root and add_child save as well
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.title_set.all().update(publisher_state=PUBLISHER_STATE_DIRTY)
if keep_state:
delattr(self, '_publisher_keep_state')
return super(Page, self).save_base(*args, **kwargs)
def is_new_dirty(self):
if self.pk:
fields = [
'publication_date', 'publication_end_date', 'in_navigation', 'soft_root', 'reverse_id',
'navigation_extenders', 'template', 'login_required', 'limit_visibility_in_menu'
]
try:
old_page = Page.objects.get(pk=self.pk)
except Page.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_page, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
def is_published(self, language, force_reload=False):
return self.get_title_obj(language, False, force_reload=force_reload).published
def toggle_in_navigation(self, set_to=None):
'''
Toggles (or sets) in_navigation and invalidates the cms page cache
'''
old = self.in_navigation
if set_to in [True, False]:
self.in_navigation = set_to
else:
self.in_navigation = not self.in_navigation
self.save()
#
# If there was a change, invalidate the cms page cache
#
if self.in_navigation != old:
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
return self.in_navigation
def get_publisher_state(self, language, force_reload=False):
try:
return self.get_title_obj(language, False, force_reload=force_reload).publisher_state
except AttributeError:
return None
def set_publisher_state(self, language, state, published=None):
title = self.title_set.get(language=language)
title.publisher_state = state
if published is not None:
title.published = published
title._publisher_keep_state = True
title.save()
if hasattr(self, 'title_cache') and language in self.title_cache:
self.title_cache[language].publisher_state = state
return title
def publish(self, language):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
:returns: True if page was successfully published.
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
# be sure we have the newest data including mptt
p = Page.objects.get(pk=self.pk)
self.path = p.path
self.depth = p.depth
self.numchild = p.numchild
if self._publisher_can_publish():
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
else:
public_page = Page(created_by=self.created_by)
if not self.publication_date:
self.publication_date = now()
self._copy_attributes(public_page)
# we need to set relate this new public copy to its draft page (self)
public_page.publisher_public = self
public_page.publisher_is_draft = False
# Ensure that the page is in the right position and save it
self._publisher_save_public(public_page)
public_page = public_page.reload()
published = public_page.parent_id is None or public_page.parent.is_published(language)
if not public_page.pk:
public_page.save()
# The target page now has a pk, so can be used as a target
self._copy_titles(public_page, language, published)
self._copy_contents(public_page, language)
# trigger home update
public_page.save()
# invalidate the menu for this site
menu_pool.clear(site_id=self.site_id)
self.publisher_public = public_page
published = True
else:
# Nothing left to do
pass
if not published:
self.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=True)
self._publisher_keep_state = True
self.save()
# If we are publishing, this page might have become a "home" which
# would change the path
if self.is_home:
for title in self.title_set.all():
if title.path != '':
title._publisher_keep_state = True
title.save()
if not published:
# was not published, escape
return
# Check if there are some children which are waiting for parents to
# become published.
from cms.models import Title
publish_set = list(self.get_descendants().filter(title_set__published=True,
title_set__language=language).select_related('publisher_public', 'publisher_public__parent').order_by('depth', 'path'))
#prefetch the titles
publish_ids = {}
for page in publish_set:
publish_ids[page.pk] = None
if page.publisher_public_id:
publish_ids[page.publisher_public.pk] = None
titles = Title.objects.filter(page__pk__in=publish_ids.keys(), language=language)
for title in titles:
publish_ids[title.page_id] = title
for page in publish_set:
if page.pk in publish_ids and publish_ids[page.pk]:
page.title_cache = {}
page.title_cache[language] = publish_ids[page.pk]
if page.publisher_public_id:
if not page.publisher_public.parent_id:
page._publisher_save_public(page.publisher_public)
#query and caching optimization
if page.publisher_public.parent_id and not page.publisher_public.parent:
page.publisher_public.parent = Page.objects.get(pk=page.publisher_public.parent_id)
if page.publisher_public.parent_id in publish_ids:
page.publisher_public.parent.title_cache = {}
page.publisher_public.parent.title_cache[language] = publish_ids[page.publisher_public.parent_id]
if page.publisher_public.parent.is_published(language):
if page.publisher_public_id in publish_ids:
public_title = publish_ids[page.publisher_public_id]
else:
public_title = None
draft_title = publish_ids[page.pk]
if public_title and not public_title.published:
public_title._publisher_keep_state = True
public_title.published = True
public_title.publisher_state = PUBLISHER_STATE_DEFAULT
public_title.save()
if draft_title.publisher_state == PUBLISHER_STATE_PENDING:
draft_title.publisher_state = PUBLISHER_STATE_DEFAULT
draft_title._publisher_keep_state = True
draft_title.save()
elif page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
page.publish(language)
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self, language=language)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
return published
def unpublish(self, language):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
# First, make sure we are in the correct state
title = self.title_set.get(language=language)
public_title = title.publisher_public
title.published = False
title.publisher_state = PUBLISHER_STATE_DIRTY
title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = title
public_title.published = False
public_title.save()
public_page = self.publisher_public
public_placeholders = public_page.get_placeholders()
for pl in public_placeholders:
pl.cmsplugin_set.filter(language=language).delete()
public_page.save()
# trigger update home
self.save()
self.mark_descendants_pending(language)
from cms.cache import invalidate_cms_page_cache
invalidate_cms_page_cache()
from cms.signals import post_unpublish
post_unpublish.send(sender=Page, instance=self, language=language)
return True
def mark_descendants_pending(self, language):
assert self.publisher_is_draft
# Go through all children of our public instance
public_page = self.publisher_public
from cms.models import Title
if public_page:
descendants = public_page.get_descendants().filter(title_set__language=language)
for child in descendants:
try:
child.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=False)
except Title.DoesNotExist:
continue
draft = child.publisher_public
if draft and draft.is_published(language) and draft.get_publisher_state(
language) == PUBLISHER_STATE_DEFAULT:
draft.set_publisher_state(language, PUBLISHER_STATE_PENDING)
def revert(self, language):
"""Revert the draft version to the same state as the public version
"""
# Revert can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
if not self.publisher_public:
raise PublicVersionNeeded('A public version of this page is needed')
public = self.publisher_public
public._copy_titles(self, language, public.is_published(language))
public._copy_contents(self, language)
public._copy_attributes(self)
self.title_set.filter(language=language).update(publisher_state=PUBLISHER_STATE_DEFAULT, published=True)
self.revision_id = 0
self._publisher_keep_state = True
self.save()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
if self.languages:
return sorted(self.languages.split(','))
else:
return []
def get_descendants(self, include_self=False):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
if include_self:
return self.__class__.get_tree(self).filter(site_id=self.site_id)
else:
return self.__class__.get_tree(self).exclude(pk=self.pk).filter(site_id=self.site_id)
def get_cached_ancestors(self):
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors())
return self.ancestors_ascending
def get_cached_descendants(self):
if not hasattr(self, "_cached_descendants"):
self._cached_descendants = list(self.get_descendants())
return self._cached_descendants
# ## Title object access
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle(language)
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_placeholders(self):
if not hasattr(self, '_placeholder_cache'):
self._placeholder_cache = self.placeholders.all()
return self._placeholder_cache
def _validate_title(self, title):
from cms.models.titlemodels import EmptyTitle
if isinstance(title, EmptyTitle):
return False
if not title.title or not title.slug:
return False
return True
def get_admin_tree_title(self):
from cms.models.titlemodels import EmptyTitle
language = get_language()
if not hasattr(self, 'title_cache'):
self.title_cache = {}
for title in self.title_set.all():
self.title_cache[title.language] = title
if language not in self.title_cache or not self._validate_title(self.title_cache.get(language, EmptyTitle(language))):
fallback_langs = i18n.get_fallback_languages(language)
found = False
for lang in fallback_langs:
if lang in self.title_cache and self._validate_title(self.title_cache.get(lang, EmptyTitle(lang))):
found = True
language = lang
if not found:
language = None
for lang, item in self.title_cache.items():
if not isinstance(item, EmptyTitle):
language = lang
if not language:
return _("Empty")
title = self.title_cache[language]
if title.title:
return title.title
if title.page_title:
return title.page_title
if title.menu_title:
return title.menu_title
return title.slug
def get_changed_date(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get when this page was last updated
"""
return self.changed_date
def get_changed_by(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get user who last changed this page
"""
return self.changed_by
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.application_urls
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif language not in self.title_cache:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
titles = Title.objects.filter(page=self)
for title in titles:
self.title_cache[title.language] = title
if language in self.title_cache:
return language
else:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
template = None
if self.template:
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
try:
template = self.get_ancestors().exclude(
template=constants.TEMPLATE_INHERITANCE_MAGIC).values_list('template', flat=True)[0]
except IndexError:
pass
if not template:
template = get_cms_setting('TEMPLATES')[0][0]
self._template_cache = template
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, request, user=None):
if not user:
user = request.user
from cms.utils.permissions import get_any_page_view_permissions, has_global_page_permission
can_see_unrestricted = get_cms_setting('PUBLIC_FOR') == 'all' or (
get_cms_setting('PUBLIC_FOR') == 'staff' and user.is_staff)
# inherited and direct view permissions
is_restricted = bool(get_any_page_view_permissions(request, self))
if not is_restricted and can_see_unrestricted:
return True
elif not user.is_authenticated():
return False
if not is_restricted:
# a global permission was given to the request's user
if has_global_page_permission(request, self.site_id, user=user, can_view=True):
return True
else:
# a specific permission was granted to the request's user
if self.get_draft_object().has_generic_permission(request, "view", user=user):
return True
# The user has a normal django permission to view pages globally
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return request.user.has_perm(codename)
def has_change_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
and self.has_generic_permission(request, "change"))
def has_delete_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + get_permission_codename('delete', opts))
and self.has_generic_permission(request, "delete"))
def has_publish_permission(self, request, user=None):
if not user:
user = request.user
if user.is_superuser:
return True
opts = self._meta
return (user.has_perm(opts.app_label + '.' + "publish_page")
and self.has_generic_permission(request, "publish"))
has_moderate_permission = has_publish_permission
def has_advanced_settings_permission(self, request, user=None):
return self.has_generic_permission(request, "advanced_settings", user)
def has_change_permissions_permission(self, request, user=None):
"""
Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions", user)
def has_add_permission(self, request, user=None):
"""
Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add", user)
def has_move_page_permission(self, request, user=None):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page", user)
def has_generic_permission(self, request, perm_type, user=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if not user:
user = request.user
att_name = "permission_%s_cache" % perm_type
if (not hasattr(self, "permission_user_cache")
or not hasattr(self, att_name)
or user.pk != self.permission_user_cache.pk):
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = user
setattr(self, att_name, has_generic_permission(
self.pk, user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing
page-scope files. This allows multiple pages to contain files with
identical names without namespace issues. Plugins such as Picture can
use this method to initialise the 'upload_to' parameter for File-based
fields. For example:
image = models.ImageField(
_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.pk, filename)
def reload(self):
"""
Reload a page from the database
"""
return Page.objects.get(pk=self.pk)
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return (self.publisher_is_draft and qs.drafts() or qs.public().published())
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise PublisherCantPublish
return True
def get_previous_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__lt=self.path, **filters).reverse()[0]
except IndexError:
return None
def get_next_filtered_sibling(self, **filters):
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
filters.update({
'site__id': self.site_id
})
try:
return self.get_siblings().filter(path__gt=self.path, **filters)[0]
except IndexError:
return None
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides
original publisher method.
Args:
obj - public variant of `self` to be saved.
"""
if self.parent_id and self.parent.publisher_public_id:
assert self.parent_id == self.parent.pk
public_parent = Page.objects.get(pk=self.parent.publisher_public_id)
else:
public_parent = None
filters = dict(publisher_public__isnull=False)
if public_parent:
filters['publisher_public__parent__in'] = [public_parent]
else:
filters['publisher_public__parent__isnull'] = True
prev_sibling = self.get_previous_filtered_sibling(**filters)
public_prev_sib = (prev_sibling.publisher_public if prev_sibling else None)
if not self.publisher_public_id: # first time published
# is there anybody on left side?
if not self.parent_id:
obj.parent_id = None
self.add_sibling(pos='right', instance=obj)
else:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
public_prev_sib.add_sibling(pos='right', instance=obj)
else:
if public_parent:
obj.parent_id = public_parent.pk
obj.parent = public_parent
obj = obj.add_root(instance=obj)
obj = obj.move(target=public_parent, pos='first-child')
else:
# check if object was moved / structural tree change
prev_public_sibling = obj.get_previous_filtered_sibling()
if self.depth != obj.depth or \
public_parent != obj.parent or \
public_prev_sib != prev_public_sibling:
if public_prev_sib:
obj.parent_id = public_prev_sib.parent_id
obj.save()
obj = obj.move(public_prev_sib, pos="right")
elif public_parent:
# move as a first child to parent
obj.parent_id = public_parent.pk
obj.save()
obj = obj.move(target=public_parent, pos='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling(**filters)
if next_sibling and next_sibling.publisher_public_id:
obj.parent_id = next_sibling.parent_id
obj.save()
obj = obj.move(next_sibling.publisher_public, pos="left")
else:
obj.save()
def move(self, target, pos=None):
super(Page, self).move(target, pos)
return self.reload()
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.placeholder import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if placeholder_name not in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
return found
def get_xframe_options(self):
""" Finds X_FRAME_OPTION from tree if inherited """
xframe_options = get_xframe_cache(self)
if xframe_options is None:
ancestors = self.get_ancestors()
# Ignore those pages which just inherit their value
ancestors = ancestors.exclude(xframe_options=self.X_FRAME_OPTIONS_INHERIT)
# Now just give me the clickjacking setting (not anything else)
xframe_options = list(ancestors.values_list('xframe_options', flat=True))
if self.xframe_options != self.X_FRAME_OPTIONS_INHERIT:
xframe_options.append(self.xframe_options)
if len(xframe_options) <= 0:
# No ancestors were found
return None
xframe_options = xframe_options[0]
set_xframe_cache(self, xframe_options)
return xframe_options
def undo(self):
"""
Revert the current page to the previous revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__lt=current_revision.pk)[0]
except IndexError as e:
e.message = "no previous revision found"
raise
previous_revision = previous_version.revision
clean = self._apply_revision(previous_revision)
return Page.objects.get(pk=self.pk), clean
def redo(self):
"""
Revert the current page to the next revision
"""
import reversion
# Get current reversion version by matching the reversion_id for the page
versions = reversion.get_for_object(self)
if self.revision_id:
current_revision = reversion.models.Revision.objects.get(pk=self.revision_id)
else:
try:
current_version = versions[0]
except IndexError as e:
e.message = "no current revision found"
raise
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__gt=current_revision.pk).order_by('pk')[0]
except IndexError as e:
e.message = "no next revision found"
raise
next_revision = previous_version.revision
clean = self._apply_revision(next_revision)
return Page.objects.get(pk=self.pk), clean
def _apply_revision(self, target_revision):
"""
Revert to a specific revision
"""
from cms.utils.page_resolver import is_valid_url
# Get current titles
old_titles = list(self.title_set.all())
# remove existing plugins / placeholders in the current page version
placeholder_ids = self.placeholders.all().values_list('pk', flat=True)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-depth')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
self.placeholders.all().delete()
# populate the page status data from the target version
target_revision.revert(True)
rev_page = get_object_or_404(Page, pk=self.pk)
rev_page.revision_id = target_revision.pk
rev_page.publisher_public_id = self.publisher_public_id
rev_page.save()
# cleanup placeholders
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
# check reverted titles for slug collisions
new_titles = rev_page.title_set.all()
clean = True
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
clean = False
return clean
def _reversion():
exclude_fields = [
'publisher_is_draft',
'publisher_public',
'publisher_state',
]
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
class AddResourceToGalleryForm(forms.Form):
# @summary:
# Form to add another user's project to your own gallery.
#The width: 40% is because for some reason the select widget didn't get bigger when I increased the length of gallery names
gallery = forms.ModelMultipleChoiceField(Gallery, None, required=False, widget=forms.SelectMultiple(attrs={"style":"width: 40%"}))
def __init__(self, user, resource, *args, **kwargs):
super(AddResourceToGalleryForm, self).__init__(*args, **kwargs)
self.user = user
#self.owner = owner
self.resource = resource
print self.user.gallery_set.exclude(default=True).exclude(shared=True)
self.fields["gallery"].queryset = self.user.gallery_set.exclude(default=True).exclude(shared=True).exclude(gtype=1).exclude(gtype=2)
def save(self):
galleries = self.cleaned_data["gallery"]
print "galleries = ", galleries
if len(galleries)>0:
for g in galleries:
self.resource.galleries.add(g)
# Resource privacy settings don't depend on gallery, so we don't need to deal with that here
self.resource.save()
return self.resource
|
bsd-3-clause
| -2,983,917,024,553,889,000
| 40.498316
| 171
| 0.59162
| false
| 4.35413
| false
| false
| false
|
huggingface/pytorch-transformers
|
src/transformers/models/roberta/modeling_roberta.py
|
1
|
65213
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
apache-2.0
| 8,092,571,453,882,208,000
| 41.875082
| 213
| 0.633601
| false
| 3.894709
| true
| false
| false
|
alimanfoo/numcodecs
|
numcodecs/bz2.py
|
1
|
1266
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import bz2 as _bz2
from numcodecs.abc import Codec
from numcodecs.compat import ndarray_copy, ensure_contiguous_ndarray
class BZ2(Codec):
"""Codec providing compression using bzip2 via the Python standard library.
Parameters
----------
level : int
Compression level.
"""
codec_id = 'bz2'
def __init__(self, level=1):
self.level = level
def encode(self, buf):
# normalise input
buf = ensure_contiguous_ndarray(buf)
# do compression
return _bz2.compress(buf, self.level)
# noinspection PyMethodMayBeStatic
def decode(self, buf, out=None):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
if out is not None:
out = ensure_contiguous_ndarray(out)
# N.B., bz2 cannot handle ndarray directly because of truth testing issues
buf = memoryview(buf)
# do decompression
dec = _bz2.decompress(buf)
# handle destination - Python standard library bz2 module does not
# support direct decompression into buffer, so we have to copy into
# out if given
return ndarray_copy(dec, out)
|
mit
| -6,111,709,686,307,507,000
| 24.32
| 82
| 0.634281
| false
| 4.22
| false
| false
| false
|
tvm1/yamlif
|
yamlif.py
|
1
|
29488
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module loads YAML configuration, presents UI to user and allows him to
set and save values to another YAML file.
"""
import sys
import os
import curses
import curses.textpad
import textwrap
import re
from editor import Editor
try:
import yaml
except ImportError:
print(
"This application requires PYYAML module to work correctly. See: "
"http://pyyaml.org")
quit(1)
def init_curses():
"""
This function sets up basic curses environment.
:return: Screen object.
"""
stdscr = curses.initscr()
maxy, maxx = stdscr.getmaxyx()
if maxy < 24 or maxx < 80:
print("Sorry, but at least 80x24 is needed.")
clean_curses()
quit(1)
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(7, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.mousemask(1)
stdscr.clear()
stdscr.border()
stdscr.refresh()
stdscr.keypad(1)
return stdscr
def clean_curses():
"""
Cleans up curses after quit.
:return: None.
"""
curses.curs_set(1)
curses.nocbreak()
curses.endwin()
def draw_menu(screen, yamlobj, menu_titles, mtitle, msel):
"""
This function draws a menu with given title and handles the keyboard input.
:param screen: Screen object.
:param yamlobj: Python object ( nested list / dicts ).
:param menu_titles: List of menu titles.
:param mtitle: Title of currently active menu.
:param msel: Starting position of cursor in menu.
:return: Index of selected item.
"""
maxy, maxx = screen.getmaxyx()
screen.clear()
screen.border()
screen.refresh()
# calculate minimal menu height
if len(menu_titles) < maxy - 4:
size_y = len(menu_titles) + 2
else:
size_y = maxy - 4
# calculate minimal menu width to fit content and title
size_x = max(len(max(menu_titles, key=len)), len(mtitle)) + 2
# some titles are too large
if size_x > maxx - 4:
size_x = maxx - 4
# trim title if too long to fit
if len(mtitle) > size_x - 2:
mtitle = mtitle[0:size_x - 2]
# calculate position, so the menu is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
screen.addstr(0, 2, 'ENTER/SPACE: Enter/edit | ESC/BACKSP: Exit | R: Run '
'commands | Q: Quit ', curses.color_pair(1))
# create actual window and border
win = curses.newwin(size_y, size_x, pos_y, pos_x)
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
# draw title
win.addstr(0, int(size_x / 2 - len(mtitle) / 2), mtitle)
# main loop that handles keyboard input and redrawing
while True:
lpos = 0
# we scrolled somewhere down
if msel > size_y - 3:
lpos = msel - size_y + 3
offset = lpos
# print the menu content
for i in range(1, size_y - 1):
mitem = menu_titles[lpos].ljust(size_x - 2)
if len(mitem) > size_x - 2:
mitem = mitem[0:size_x - 5] + "..."
if msel + 1 == i + offset:
win.addstr(i, 1, str(mitem), curses.color_pair(1))
else:
win.addstr(i, 1, str(mitem))
lpos += 1
win.refresh()
ckey = screen.getch()
# read keys and redraw, return item index on ENTER, return -1 on exit
if ckey == curses.KEY_UP:
if msel > 0:
msel -= 1
elif ckey == curses.KEY_DOWN:
if msel < len(menu_titles) - 1:
msel += 1
elif ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
del win
return msel
elif ckey == ord("R") or ckey == ord("r"):
run_commands(yamlobj)
elif ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
elif ckey == 27 or ckey == curses.KEY_BACKSPACE:
return -1
win.refresh()
del win
screen.touchwin()
screen.refresh()
def draw_page(screen, yamlobj, fn, obj, pid, ptitle, msel):
"""
This functions draws page and its content.
:param screen: Curses screen object.
:param yamlobj: Whole python object ( nested list / dicts ).
:param fn: Filename of input file.
:param obj: Python object ( nested list / dicts ).
:param pid: Page id.
:param ptitle: Page title.
:param msel: Currently Highlighted item.
:return: Position of currently selected page element.
"""
maxy, maxx = screen.getmaxyx()
# something to begin with, fit at least page title
size_y = 2
size_x = len(ptitle) + 2
newelem = None
# determine page height and width
for i, elem in enumerate(obj):
if elem.get('value') is None:
value_length = 0
else:
value_length = len(str(elem.get('value')))
if 'checkbox' in elem:
size_y += 1
width = len(elem.get('title')) + 6
newelem = 'checkbox'
elif 'radio' in elem:
size_y += 1
width = len(elem.get('title')) + 6
newelem = 'radio'
elif 'textbox' in elem:
size_y += 1
width = len(elem.get('title')) + value_length + 4
if width > maxx:
width = maxx
newelem = 'textbox'
elif 'textarea' in elem:
size_y += 2
width = int(maxx / 2)
newelem = 'textarea'
elif 'textdisplay' in elem:
# wrapping is handled here
if len(elem.get('value')) > int(maxx / 2):
width = int(maxx / 2)
wrapped = textwrap.wrap(elem.get('value'), int(maxx / 2) - 2)
# if it's too long, we will truncate it to five lines
if len(wrapped) > 4:
size_y += 5
else:
size_y += len(wrapped)
else:
# it's only one line
width = len(elem.get('value')) + 2
size_y += 1
newelem = 'textdisplay'
# element has changed, add blank line
if elem != obj[-1]:
if newelem not in obj[i + 1]:
size_y += 1
# current element requires more space, allocate it
if width > size_x:
size_x = width
# bail out if page is too large (for now)
if size_y > maxy:
draw_popup(screen, 'Page is way too large to view.')
return -1
# page would be too wide
if size_x > maxx - 4:
size_x = maxx - 4
# calculate position, so the page is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window and border
win = curses.newwin(size_y, size_x, pos_y, pos_x)
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
# draw title
win.addstr(0, int(size_x / 2 - len(ptitle) / 2), ptitle)
# some help too
if size_x > 7:
win.addstr(size_y - 1, 2, 'S: Save', curses.color_pair(1))
newelem = None
offset = 1
# main loop that draws page
for i, elem in enumerate(obj):
# color for currently selected item
if i == msel:
cl = curses.color_pair(1)
else:
cl = curses.color_pair(0)
# this actually draws what is visible
if 'checkbox' in elem:
newelem = 'checkbox'
if elem.get('value', False) is True:
win.addstr(i + offset, 1,
'[*] ' + elem.get('title', '')[0:size_x - 6], cl)
else:
win.addstr(i + offset, 1,
'[ ] ' + elem.get('title', '')[0:size_x - 6], cl)
elif 'radio' in elem:
newelem = 'radio'
if elem.get('value', False) is True:
win.addstr(i + offset, 1,
'(*) ' + elem.get('title', '')[0:size_x - 6], cl)
else:
win.addstr(i + offset, 1,
'( ) ' + elem.get('title', '')[0:size_x - 6], cl)
elif 'textbox' in elem:
newelem = 'textbox'
# value and title might be too long
if len(str(elem.get('title'))) + len(
str(elem.get('value'))) + 4 <= size_x:
win.addstr(i + offset, 1, elem.get('title') + ": " + str(
elem.get('value', '')), cl)
else:
# so truncate it to fit the screen
spc = size_x - len(str(elem.get('title'))) - 4
# title is really long, truncate it
if spc <= 0:
tmptitle = elem.get('title')[0:int(size_x / 2)] + "..."
spc = size_x - len(tmptitle) - 4
else:
tmptitle = elem.get('title')
ln = str(elem.get('value', ' '))[0:spc]
ln = re.sub('...............$', '... [truncated]', ln)
win.addstr(i + offset, 1, tmptitle + ": " + str(ln), cl)
elif 'textarea' in elem:
newelem = 'textarea'
# title might be too long
tmptitle = str(elem.get('title', ''))[0:int(size_x / 2)]
# check if there's value at all, otherwise leave space blank
if len(elem.get('value', '')) == 0:
win.addstr(i + offset, 1, tmptitle + ": ", cl)
offset += 1
elif 'value' in elem:
textlist = elem.get('value', '').rstrip().split('\n')
for j, ln in enumerate(textlist):
ln = ln[0:size_x - 4 - len(tmptitle)]
if j == 0:
win.addstr(i + offset, 1, tmptitle + ": " + str(ln),
cl)
offset += 1
if j == 1:
if len(textlist) > 2:
ln = re.sub('.............$', '... [wrapped]', ln)
win.addstr(i + offset, 1 + len(tmptitle) + 2, str(ln),
cl)
break
elif 'textdisplay' in elem:
newelem = 'textdisplay'
# wrapping is handled here
textlist = textwrap.wrap(elem.get('value', ''), size_x - 2)
# print whatever is in content of textdisplay
for j, ln in enumerate(textlist):
# if it's too many lines, truncate
if j == 4 and len(textlist) > 4:
ln = re.sub('.............$', '... [wrapped]', ln)
win.addstr(i + offset, 1, str(ln), cl)
break
# print current line
win.addstr(i + offset, 1, str(ln), cl)
if j + 1 < len(textlist):
offset += 1
# element has changed, add blank line
if elem != obj[-1]:
if newelem not in obj[i + 1]:
offset += 1
win.attroff(curses.A_BOLD)
win.noutrefresh()
curses.doupdate()
ckey = screen.getch()
# read keys and update, edit value on ENTER, return -1 if leaving
if ckey == curses.KEY_UP:
if msel == 0:
msel = len(obj) - 1
else:
msel -= 1
elif ckey == curses.KEY_DOWN:
if msel == len(obj) - 1:
msel = 0
else:
msel += 1
elif ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
set_value(obj, msel, screen)
elif ckey == ord("s") or ckey == ord("S"):
exval, log = save_yaml(fn, yamlobj, pid, obj)
# print on_save log if available
if len(log) != 0:
draw_popup(screen, log)
# give user some feedback
if exval == 0:
draw_popup(screen, 'Data saved.')
else:
draw_popup(screen, 'Save failed.')
elif ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
elif ckey == 27 or ckey == curses.KEY_BACKSPACE:
msel = -1
del win
return msel
def draw_popup(screen, text='empty'):
"""
Generic function that draws a popup window in UI.
:param screen: Curses screen object.
:param text: Text to be displayed.
:return: None.
"""
maxy, maxx = screen.getmaxyx()
wrapped = []
# determine window size
if len(text) > maxx - 2:
# popup needs more than one line
size_x = int(maxx / 1.5) + 2
wrapped = textwrap.wrap(text, int(maxx / 1.5))
# try some reasonable window heights
if len(wrapped) + 2 > int(maxy / 1.5):
size_y = int(maxy / 1.5)
else:
size_y = len(wrapped) + 2
else:
# popup fits on one line
size_x = len(text) + 2
size_y = 3
# calculate position, so the popup is centered
pos_y = int(maxy / 2 - size_y / 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window
win = curses.newwin(size_y, size_x, pos_y, pos_x)
start_pos = 0
while True:
# clear and redraw
win.clear()
# print text into window
if len(wrapped) > 0:
j = 0
for i in range(1, size_y - 1):
win.addstr(i, 1, str(wrapped[start_pos + j]))
j += 1
else:
win.addstr(1, 1, str(text))
win.attron(curses.A_BOLD)
win.border()
win.attroff(curses.A_BOLD)
if size_x >= 80:
win.addstr(0, 2,
' ARROWS: Up/down | ENTER/SPACE/BACKSPACE/ESC: Exit '
'view | Q: Quit ', curses.color_pair(1))
# display arrows, if scrollable
if start_pos != 0:
win.addstr(0, size_x - 7, '↑↑↑↑↑', curses.color_pair(1))
if start_pos + size_y - 2 < len(wrapped):
win.addstr(size_y - 1, size_x - 7, '↓↓↓↓↓', curses.color_pair(1))
win.refresh()
ckey = screen.getch()
# read keys scroll and redraw, handle exit
if ckey == curses.KEY_UP:
if start_pos > 0:
start_pos -= 1
if ckey == curses.KEY_DOWN:
if start_pos + size_y - 2 < len(wrapped):
start_pos += 1
if ckey == curses.KEY_ENTER or ckey == 10 or ckey == ord(" "):
break
if ckey == ord("q") or ckey == ord("Q"):
clean_curses()
quit(0)
if ckey == 27 or ckey == curses.KEY_BACKSPACE:
break
del win
screen.touchwin()
screen.refresh()
def draw_inputbox(screen, text='empty'):
"""
Generic function that draws a inputbox in UI.
:param screen: Curses screen object.
:param text: Text to be displayed
:return: value
"""
maxy, maxx = screen.getmaxyx()
if len(str(text)) > 64:
draw_popup(screen, 'Field contains invalid value.')
return None
# calculate position, so the inputbox is centered
size_x = int(67)
pos_y = int(maxy / 2 - 2)
pos_x = int(maxx / 2 - size_x / 2)
# create actual window and border
win = curses.newwin(3, size_x, pos_y, pos_x)
win.border()
win.addstr(0, 1, 'Please insert value (EMACS keys available):',
curses.color_pair(1))
win.refresh()
# derived subwindow
swin = win.derwin(1, size_x - 2, 1, 1)
curses.cbreak()
curses.curs_set(1)
screen.keypad(1)
# draw textpad and read value
tpad = curses.textpad.Textbox(swin)
swin.addstr(0, 0, str(text))
value = tpad.edit()
curses.curs_set(0)
del swin
del win
screen.touchwin()
screen.refresh()
return value.rstrip()
def draw_inputarea(screen, text='empty'):
"""
Generic function that draws a 'editor' in UI.
:param screen: Curses screen object.
:param text: Text to be displayed
:return: value
"""
maxy, maxx = screen.getmaxyx()
pos_y = int(4)
pos_x = int(4)
win = curses.newwin(maxy - 8, maxx - 8, pos_y, pos_x)
win.border()
win.refresh()
swin = win.derwin(maxy - 10, maxx - 10, 1, 1)
curses.cbreak()
curses.curs_set(1)
screen.keypad(1)
win.addstr(0, 1, 'EMACS-like keys available, CTRL-G to exit')
win.refresh()
tpad = curses.textpad.Textbox(swin)
swin.addstr(0, 0, str(text))
value = tpad.edit()
curses.curs_set(0)
del swin
del win
screen.touchwin()
screen.refresh()
return value.rstrip()
def open_yaml(yfile):
"""
This function opens file with YAML configuration.
:param yfile: Name of file.
:return: Python object ( nested lists / dicts ).
"""
with open(yfile, 'r') as stream:
yamlobj = yaml.load(stream)
return yamlobj
def load_service_functions(fn, globs):
"""
This function imports service functions if they are present.
:param fn: Filename of opened file.
:param globs: Caller's globals().
:return: 0 if success, else 1.
"""
fn = re.sub('.yaml$', '.py', fn)
if os.path.isfile(fn):
exec(compile(open(fn).read(), fn, 'exec'), globs)
return 0
else:
return 1
def run_commands(yamlobj):
"""
Runs commands stored in YAML.
:param yamlobj: Python object ( nested list / dicts ).
:return: None.
"""
# reset screen
clean_curses()
# run commands
commands = (yamlobj.get('commands'))
os.system(commands)
input("Press ENTER to continue ... ")
# reinit stuff back
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.mousemask(1)
def save_yaml(fn, yamlobj, pid, obj):
"""
This function saves values to YAML file.
:param fn: Filename of input file.
:param yamlobj: Whole Python object ( nested lists / dicts ).
:param pid: Page ID.
:param obj: Python object ( nested lists / dicts ).
:return: Exit status.
"""
newobj = {}
if len(obj) == 0:
return 1
# make up new name for _data file
if re.match('^.*\.yaml$', fn):
# just so the source is *never* overwritten
fn += '_'
fn = re.sub('\.yaml_$', '_data.yaml', fn)
else:
# filename was odd, so we just use something
fn += '.data'
# save only values/items that we want
for elem in obj:
if 'checkbox' in elem:
nkey = elem['checkbox']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'radio' in elem:
nkey = elem['radio']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'textbox' in elem:
nkey = elem['textbox']
nval = elem.get('value', "")
newobj[nkey] = nval
elif 'textarea' in elem:
nkey = elem['textarea']
nval = elem.get('value', "")
newobj[nkey] = nval
# fetch save function, if available
save_func = get_save_function(yamlobj, pid)
log = ""
# if the function is available, call it and pass the dict
if save_func in globals():
save_func += '(newobj)'
log = eval(save_func)
# reverse mapping back to UI
for key, val in newobj.items():
for elem in obj:
if key in elem.values():
elem['value'] = val
oldsave = {}
# if there's old save, load it
if os.path.isfile(fn):
with open(fn, 'r') as rstream:
oldsave = yaml.load(rstream)
# save file was empty for some reason
if oldsave is None:
oldsave = {}
oldsave[pid] = newobj
# save the modified object
with open(fn, 'w') as wstream:
yaml.dump(oldsave, wstream, default_flow_style=False)
return 0, log
def get_menulist(yamlobj, root=False):
"""
This function parses objects returned by get_menucontent() and prepares
input for draw_menu().
:param yamlobj: Python object ( nested list / dicts ).
:param root: True only if parsing YAML hierarchy from top.
:return: menu_ids - list of IDs, menu_titles - list of menu titles.
"""
menu_ids = []
menu_titles = []
if root is True:
for obj in yamlobj['content']:
if 'menu' in obj:
menu_ids.append(obj["menu"])
menu_titles.append(obj["title"])
elif 'page' in obj:
menu_ids.append(obj["page"])
menu_titles.append(obj["title"])
else:
for obj in yamlobj:
if 'menu' in obj:
menu_ids.append(obj["menu"])
menu_titles.append(obj["title"])
elif 'page' in obj:
menu_ids.append(obj["page"])
menu_titles.append(obj["title"])
return menu_ids, menu_titles
def get_nodetype(obj, objid):
"""
Returns key of the object with given ID. (eg., menu, page, etc. )
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Key of given ID.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = key
elif isinstance(val, list) or isinstance(val, dict):
retval = get_nodetype(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_nodetype(elem, objid)
if retval is not None:
result = retval
return result
def get_title(obj, objid):
"""
Returns title value of the object with given ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Title of given ID.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj['title']
elif isinstance(val, list) or isinstance(val, dict):
retval = get_title(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_title(elem, objid)
if retval is not None:
result = retval
return result
def get_save_function(obj, objid):
"""
Returns on_save function name the object with given ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given page.
:return: Name of onsave function.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj.get('on_save')
elif isinstance(val, list) or isinstance(val, dict):
retval = get_save_function(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_save_function(elem, objid)
if retval is not None:
result = retval
return result
def get_objectcontent(obj, objid):
"""
Returns list / dictionary structure that is content of given YAML ID.
:param obj: Structure containing YAML object ( nested lists / dicts ).
:param objid: YAML ID of given node.
:return: Nested list / dictionary.
"""
result = None
if isinstance(obj, dict):
for key, val in obj.items():
if val == objid:
result = obj['content']
elif isinstance(val, list) or isinstance(val, dict):
retval = get_objectcontent(val, objid)
if retval is not None:
result = retval
elif isinstance(obj, list):
for elem in obj:
if isinstance(elem, list) or isinstance(elem, dict):
retval = get_objectcontent(elem, objid)
if retval is not None:
result = retval
return result
def set_value(obj, msel, screen):
"""
Changes value of given YAML object.
:param obj: Structure containing Python dictionary.
:param msel: Object index to modify.
:param screen: Screen object.
:return: None.
"""
# editor needs this
maxy, maxx = screen.getmaxyx()
# determine what object we try to change and act accordingly
if 'checkbox' in obj[msel]:
if obj[msel].get('value', False) is False:
obj[msel]['value'] = True
else:
obj[msel]['value'] = False
elif 'radio' in obj[msel]:
obj[msel]['value'] = True
i = msel + 1
# disable other adjacent radioboxes
while i < len(obj):
if 'radio' in obj[i]:
obj[i]['value'] = False
i += 1
else:
break
i = msel - 1
while i >= 0:
if 'radio' in obj[i]:
obj[i]['value'] = False
i -= 1
else:
break
elif 'textbox' in obj[msel]:
# if there's value, edit it
if 'value' in obj[msel]:
newval = draw_inputbox(screen, obj[msel]['value'])
obj[msel]['value'] = str(newval)
else:
newval = draw_inputbox(screen, '')
obj[msel]['value'] = str(newval)
elif 'textarea' in obj[msel]:
# if there's value, edit it
if 'value' in obj[msel]:
newval = Editor(screen,
title='Editing ' + obj[msel]['title'] + " ",
inittext=obj[msel]['value'], box=True,
win_size=(maxy - 6, maxx - 6),
win_location=(3, 3))()
obj[msel]['value'] = newval
else:
newval = Editor(screen,
title='Editing ' + obj[msel]['title'] + " ",
box=True,
win_size=(maxy - 6, maxx - 6),
win_location=(3, 3))()
obj[msel]['value'] = newval
# reset to previous state
curses.curs_set(0)
screen.clear()
screen.border()
screen.addstr(0, 2, 'ENTER/SPACE: Enter/edit | ESC/BACKSP: Exit | R: '
'Run commands | Q: Quit ', curses.color_pair(1))
screen.refresh()
elif 'textdisplay' in obj[msel]:
# open scrollable window
draw_popup(screen, obj[msel]['value'])
def main():
"""
Contains main loop that loads YAML, draws menu and decides what to do
with selected items.
:return: Exit value
"""
# fix the curses ESCAPE key delay
os.environ['ESCDELAY'] = '0'
if len(sys.argv) < 2:
print("Please provide a file!")
quit(1)
# start with first item selected
msel = 0
fn = sys.argv[1]
# open file & set up screen
yamlobj = open_yaml(fn)
# try to load service functions
load_service_functions(fn, globals())
# initialize curses
stdscr = init_curses()
# top menu defaults
mhist = []
mid = yamlobj['menu']
mtitle = yamlobj['title']
mhist.append(mid)
# get content for the first menu
menu_ids, menu_titles = get_menulist(yamlobj, True)
# main loop that draws menu and allows to traverse & open menu items
while True:
msel = draw_menu(stdscr, yamlobj, menu_titles, mtitle, msel)
# leaving menu and going back to top
if msel == -1:
if len(mhist) > 1:
mhist.pop()
mid = mhist.pop()
else:
msel = 0
continue
else:
mid = menu_ids[msel]
eltype = get_nodetype(yamlobj, mid)
# we entered menu, append it to history
if eltype == 'menu':
mhist.append(mid)
# determine what we try to open and act accordingly
if eltype == 'page':
psel = 0
# don't leave page unless ESC is pressed
while psel != -1:
psel = draw_page(stdscr, yamlobj, fn,
get_objectcontent(yamlobj, mid), mid,
get_title(yamlobj, mid),
psel)
elif eltype == 'menu':
# entering new menu, get title and content
mtitle = get_title(yamlobj, mid)
menu_ids, menu_titles = get_menulist(
get_objectcontent(yamlobj, mid))
msel = 0
# quit
clean_curses()
exit(0)
if __name__ == '__main__':
main()
|
mit
| 326,057,240,882,830,200
| 26.905303
| 79
| 0.518087
| false
| 3.7104
| false
| false
| false
|
CaptainDesAstres/Frames-Animated-By-Curve
|
multi_track/TracksList.py
|
1
|
7394
|
import bpy
from functions import *
class Track(bpy.types.PropertyGroup):
'''object use to be listed as track in tracks list'''
def set_end_frame(self, context):
'''check that start and end frame are valid when
changing end frame settings'''
size = self.get(True).curve_to_frame.size
# check end isn't over clip size
if self.end > size:
self.end = size
# check start isn't over end
if self.start >= self.end:
if self.end > 1:
self['start'] = self.end - 1
else:
self['start'] = 1
self['end'] = 2
def set_start_frame(self, context):
'''check that start and end frame are valid
when changing start frame settings'''
# check start isn't under 0
if self.start < 1:
self.start = 1
# check start isn't over end
if self.start >= self.end:
size = self.get(True).curve_to_frame.size
if self.start < size:
self['end'] = self.start + 1
else:
self['start'] = size - 1
self['end'] = size
# all properties
name = bpy.props.StringProperty()
uid = bpy.props.StringProperty()
track_id = bpy.props.IntProperty()
start = bpy.props.IntProperty(
name = "First frame",
description = "First frame to take in count",
update = set_start_frame)
end = bpy.props.IntProperty(
name = "Last frame",
description = "Last frame to take in count",
update = set_end_frame)
followers = bpy.props.StringProperty(
name = "Random following track",
description = "Which track can follow this track when switch mode is «Ramdomly with succession rules». Specify integer value separated by «;». empty string means every track.",
default = '')
def get( self, rename = False):
'''return the movie clip corresponding to this track'''
# get movieclip by name
try:
track = bpy.data.movieclips[ self.name ]
if track.curve_to_frame.uid == self.uid:
return track
except KeyError:
pass
# get it by uid in case name have been changed
for track in bpy.data.movieclips:
if track.curve_to_frame.uid == self.uid:
if rename:
# update with new name
try:
self.name = track.name
except AttributeError:
print('Track renaming error on '+self.name)
return track
# if none corresponding movieclip finded
return None
def get_followers( self, list_size ):
'''return a list of possible followers'''
followers = []
strings = self.followers.split(';')
for fol in strings:
try:
fol = int(fol) % list_size
if fol not in followers:
followers.append( fol )
except ValueError:
pass
return followers
def get_frame( self, combination ):
'''Return frame number relative to combination value and start/end settings'''
return round( self.start + ( self.end - self.start ) * combination )
class TrackItem(bpy.types.UIList):
'''Item to display tracks in a list template'''
def draw_item(
self,
context,
layout,
data,
item,
icon,
active_data,
active_propname,
index ):
'''draw item row'''
# display name and index in list
sp = layout.split(0.05)
col = sp.column()
col.label( str(item.track_id) )
sp = sp.split(0.75)
col = sp.column()
col.label(item.name, icon='CLIP')
col = sp.column()
col.label('['+str(item.start)+'-'+str(item.end)+']')
class TracksActions(bpy.types.Operator):
'''Tacks list action operator'''
bl_idname = "curve_to_frame.tracks_list_action"
bl_label = "Track Action"
bl_description = "Track Action:\n\
- Move up selected track.\n\
- Check all Tracks.\n\
- Delete selected track.\n\
- Move down selected track."
bl_options = {'INTERNAL'}
action = bpy.props.EnumProperty(
items=(
('UP', "Up", ""),
('DOWN', "Down", ""),
('REMOVE', "Remove", ""),
('CHECK', "Check", ""),
)
)
def invoke(self, context, event):
scn = context.scene
i = scn.curve_to_frame.selected_track
try:
item = scn.curve_to_frame.tracks[i]
except IndexError:
self.report({'ERROR'}, 'Error: bad selection')
return {"CANCELLED"}
if self.action == 'DOWN':
# move selected track down
if( i < len(scn.curve_to_frame.tracks)-1 ):
scn.curve_to_frame.tracks.move( i, i+1 )
scn.curve_to_frame.selected_track = i+1
elif self.action == 'UP':
# move selected track up
if( i > 0 ):
scn.curve_to_frame.tracks.move( i, i-1 )
scn.curve_to_frame.selected_track = i-1
elif self.action == 'REMOVE':
# remove selected track
scn.curve_to_frame.tracks.remove(i)
length = len(scn.curve_to_frame.tracks)-1
if i > length:
scn.curve_to_frame.selected_track = length
elif self.action == 'CHECK':
# check if all tracks in the list are OK
index = -1
for key in scn.curve_to_frame.tracks.keys():
index += 1
# report and remove inexistant Track
track = scn.curve_to_frame.tracks[key].get( True)
if track is None:
self.report({'ERROR'}, 'Error: \''+key+'\' movieclip didn\'t exist. the corresponding track have been removed.')
scn.curve_to_frame.tracks.remove(index)
index -= 1
continue
# report and remove Track which isn't SEQUENCE
if track.source != 'SEQUENCE':
self.report({'ERROR'}, 'Error: \''+key+'\' movieclip is not a sequence. the corresponding track have been removed.')
scn.curve_to_frame.tracks.remove(index)
index -= 1
continue
# initialize corresponding movieclip if necessary
if track.curve_to_frame.uid == '':
track.curve_to_frame.initialize()
if get_fcurve_by_data_path(track, 'curve_to_frame.peaks_shape') is None:
track.curve_to_frame.init_peaks_shape_curve()
# check all image of the sequence exist
if not track.curve_to_frame.check_image_file():
self.report({'ERROR'}, 'Error: some images source file of \''+key+'\' movieclip are missing.')
# update track id
index = -1
for key in scn.curve_to_frame.tracks.keys():
index +=1
scn.curve_to_frame.tracks[index].track_id = index
return {"FINISHED"}
class TracksList():
'''Tracks list properties and method'''
def add_track( self, context ):
'''add the selected tracks in tracks list'''
# get new track name and avoid recursive call
track = self.track_add
if track == '':
return
# get the corresponding movieclip
try:
track = bpy.data.movieclips[ track ]
except KeyError:
return
# check the source is compatible
if track.source != 'SEQUENCE':
return
# load tracks if necessary
if track.curve_to_frame.uid == '':
track.curve_to_frame.initialize()
if get_fcurve_by_data_path(track, 'curve_to_frame.peaks_shape') is None:
track.curve_to_frame.init_peaks_shape_curve()
# add to the list
new = self.tracks.add()
new.name = track.name
new.uid = track.curve_to_frame.uid
new.track_id = len(self.tracks)-1
new.start = 1
new.end = track.curve_to_frame.size
self.selected_track = new.track_id
# clear the add field
self.track_add=''
#########################
## list and properties ##
#########################
track_add = bpy.props.StringProperty(
name = "Add",
description = "Add tracks to the list",
default = '',
update = add_track )
tracks = bpy.props.CollectionProperty(
type=Track,
options = {'LIBRARY_EDITABLE'} )
selected_track = bpy.props.IntProperty( default = -1 )
|
gpl-3.0
| -3,735,279,142,487,749,000
| 23.389439
| 178
| 0.63613
| false
| 3.140671
| false
| false
| false
|
perkinslr/pyHorde3D
|
pyHorde3D/horde3d_h.py
|
1
|
1776
|
# horde3d_h.py
#
# Copyright 2014 Logan Perkins <perkins@lp-programming.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the Eclipse Public License 1.0
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
#
import os, subprocess
t=subprocess.Popen(['gcc','-E',"-I", os.environ.get('HORDE3DINCLUDE','/usr/local/include'), "%s/Horde3DUtils.h"%os.environ.get('HORDE3DINCLUDE','/usr/local/include'),'-DDLL='],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
t.wait()
data=t.stdout.readlines()
data=str.join('', [l for l in data if '#' not in l])
import re
structs=re.compile('struct ([a-zA-Z0-9]+).*?\n{*(\n*?.*?)*? enum [a-zA-Z]*.*?\n.*\n?([ 0-9a-zA-Z=,\n]+)?')
s={}
for struct in structs.findall(data):
attrs=struct[2].replace(',','').replace(' ','').strip().split('\n')
attrs1=[]
values=[]
for attr in attrs:
if '=' in attr:
attrs1.append(attr.split('=')[0])
values.append(int(attr.split('=')[1]))
else:
if not values:
values.append(0)
else:
values.append(values[-1]+1)
attrs1.append(attr)
values=iter(values)
d={a:values.next() for a in attrs1}
globals()[struct[0]]=type(struct[0],(),d)
s[struct[0]]=globals()[struct[0]]
import cffi
ffi=cffi.FFI()
ffi.cdef(structs.split(data)[0])
cdefs=structs.split(data)[-1].replace('''};
};
''','\n').replace('\n ','\n')
cdefs=re.sub(' [a-zA-Z0-9]+::[a-zA-Z0-9]+ ',' int ',cdefs)
ffi.cdef(cdefs)
def getfunctions(lib):
functions={}
for f in re.findall('([a-zA-Z][a-zA-Z0-9]*)\(',cdefs):
try:
functions[f]=getattr(lib,f)
except Exception as e:
print e
return functions
|
epl-1.0
| 541,706,671,798,995,260
| 24.73913
| 223
| 0.643018
| false
| 2.666667
| false
| false
| false
|
siliconsmiley/QGIS
|
python/plugins/processing/algs/otb/maintenance/parsing.py
|
1
|
6131
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
parsing.py
---------------------
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI)
Oscar Picas (CS SI)
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Julien Malik, Oscar Picas'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
from collections import namedtuple
import re
def merge_pairs(list, should_merge, merge):
"""
Merges adjacent elements of list using the function merge
if they satisfy the predicate should_merge.
"""
ret = []
i = 0
while i < len(list) - 1:
a = list[i]
b = list[i + 1]
if should_merge(a, b):
ret.append(merge(a, b))
i += 2
else:
ret.append(a)
i += 1
if i == len(list) - 1:
ret.append(list[i])
return ret
QuotedString = namedtuple('QuotedString', 'contents comments')
_Arg = namedtuple('Arg', 'contents comments')
_Command = namedtuple('Command', 'name body comment')
BlankLine = namedtuple('BlankLine', '')
class File(list):
def __repr__(self):
return 'File(' + repr(list(self)) + ')'
class Comment(str):
def __repr__(self):
return 'Comment(' + unicode(self) + ')'
def Arg(contents, comments=None):
return _Arg(contents, comments or [])
def Command(name, body, comment=None):
return _Command(name, body, comment)
class CMakeParseError(Exception):
pass
def prettify(s):
"""
Returns the pretty-print of the contents of a CMakeLists file.
"""
return unicode(parse(s))
def parse(s):
'''
Parses a string s in CMakeLists format whose
contents are assumed to have come from the
file at the given path.
'''
nums_toks = tokenize(s)
nums_items = list(parse_file(nums_toks))
nums_items = attach_comments_to_commands(nums_items)
items = [item for _, item in nums_items]
return File(items)
def parse_file(toks):
'''
Yields line number ranges and top-level elements of the syntax tree for
a CMakeLists file, given a generator of tokens from the file.
toks must really be a generator, not a list, for this to work.
'''
prev_type = 'newline'
for line_num, (typ, tok_contents) in toks:
if typ == 'comment':
yield ([line_num], Comment(tok_contents))
elif typ == 'newline' and prev_type == 'newline':
yield ([line_num], BlankLine())
elif typ == 'word':
line_nums, cmd = parse_command(line_num, tok_contents, toks)
yield (line_nums, cmd)
prev_type = typ
def attach_comments_to_commands(nodes):
return merge_pairs(nodes, command_then_comment, attach_comment_to_command)
def command_then_comment(a, b):
line_nums_a, thing_a = a
line_nums_b, thing_b = b
return (isinstance(thing_a, _Command) and
isinstance(thing_b, Comment) and
set(line_nums_a).intersection(line_nums_b))
def attach_comment_to_command(lnums_command, lnums_comment):
command_lines, command = lnums_command
_, comment = lnums_comment
return command_lines, Command(command.name, command.body[:], comment)
def parse_command(start_line_num, command_name, toks):
cmd = Command(name=command_name, body=[], comment=None)
expect('left paren', toks)
for line_num, (typ, tok_contents) in toks:
if typ == 'right paren':
line_nums = range(start_line_num, line_num + 1)
return line_nums, cmd
elif typ == 'left paren':
raise ValueError('Unexpected left paren at line %s' % line_num)
elif typ in ('word', 'string'):
cmd.body.append(Arg(tok_contents, []))
elif typ == 'comment':
c = tok_contents
if cmd.body:
cmd.body[-1].comments.append(c)
else:
cmd.comments.append(c)
msg = 'File ended while processing command "%s" started at line %s' % (
command_name, start_line_num)
raise CMakeParseError(msg)
def expect(expected_type, toks):
line_num, (typ, tok_contents) = toks.next()
if typ != expected_type:
msg = 'Expected a %s, but got "%s" at line %s' % (
expected_type, tok_contents, line_num)
raise CMakeParseError(msg)
# http://stackoverflow.com/questions/691148/pythonic-way-to-implement-a-tokenizer
scanner = re.Scanner([
(r'#.*', lambda scanner, token: ("comment", token)),
(r'"[^"]*"', lambda scanner, token: ("string", token)),
(r"\(", lambda scanner, token: ("left paren", token)),
(r"\)", lambda scanner, token: ("right paren", token)),
(r'[^ \t\r\n()#"]+', lambda scanner, token: ("word", token)),
(r'\n', lambda scanner, token: ("newline", token)),
(r"\s+", None), # skip other whitespace
])
def tokenize(s):
"""
Yields pairs of the form (line_num, (token_type, token_contents))
given a string containing the contents of a CMakeLists file.
"""
toks, remainder = scanner.scan(s)
line_num = 1
if remainder != '':
msg = 'Unrecognized tokens at line %s: %s' % (line_num, remainder)
raise ValueError(msg)
for tok_type, tok_contents in toks:
yield line_num, (tok_type, tok_contents.strip())
line_num += tok_contents.count('\n')
|
gpl-2.0
| -1,898,056,095,687,286,800
| 35.064706
| 81
| 0.550481
| false
| 3.863264
| false
| false
| false
|
gem/sidd
|
sidd/operator/loaders/survey.py
|
1
|
17568
|
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
module constains class for loading survey data in SQLite format
"""
import csv
import sqlite3
import datetime
from os.path import exists
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsVectorFileWriter, QgsFeature, QgsField, QgsGeometry, QgsPoint
from utils.shapefile import load_shapefile_verify, remove_shapefile
from utils.system import get_unique_filename
from sidd.taxonomy.gem import GemTaxonomyAttribute
from sidd.constants import logAPICall, \
GID_FIELD_NAME, LON_FIELD_NAME, LAT_FIELD_NAME, TAX_FIELD_NAME, \
GRP_FIELD_NAME, AREA_FIELD_NAME, HT_FIELD_NAME, COST_FIELD_NAME
from sidd.operator import Operator,OperatorError, OperatorDataError
from sidd.operator.data import OperatorDataTypes
class GEMDBSurveyLoader(Operator):
""" loading field survey data in CSV format"""
HT_ATTRIBUTE_NAME='Height'
YR_ATTRIBUTE_NAME='Date of Construction'
def __init__(self, options=None, name="Survey Loader"):
""" constructor """
Operator.__init__(self, options, name)
self._tmp_dir = options['tmp_dir']
self.taxonomy = options['taxonomy']
# check if height/year range is requested
# range is stored as dictionary {'min_values':min_values, 'max_values':max_values}
# where min_value and max_value are arrays of values
if options.has_key(self.HT_ATTRIBUTE_NAME):
ht_ranges = options[self.HT_ATTRIBUTE_NAME]
min_values_count = len(ht_ranges['min_values'])
max_values_count = len(ht_ranges['max_values'])
# use range only if it is correctly set
if min_values_count>0 and max_values_count>0 and min_values_count==max_values_count:
self.ht_ranges = options[self.HT_ATTRIBUTE_NAME]
if options.has_key(self.YR_ATTRIBUTE_NAME):
ht_ranges = options[self.YR_ATTRIBUTE_NAME]
min_values_count = len(ht_ranges['min_values'])
max_values_count = len(ht_ranges['max_values'])
# use range only if it is correctly set
if min_values_count>0 and max_values_count>0 and min_values_count==max_values_count:
self.yr_ranges = options[self.YR_ATTRIBUTE_NAME]
self._fields = {
0 : QgsField(GID_FIELD_NAME, QVariant.String),
1 : QgsField(LON_FIELD_NAME, QVariant.Double),
2 : QgsField(LAT_FIELD_NAME, QVariant.Double),
3 : QgsField(TAX_FIELD_NAME, QVariant.String, "", 255),
4 : QgsField(GRP_FIELD_NAME, QVariant.String),
5 : QgsField(AREA_FIELD_NAME, QVariant.String),
6 : QgsField(HT_FIELD_NAME, QVariant.String),
7 : QgsField(COST_FIELD_NAME, QVariant.String),
}
# self documenting method override
###########################
@property
def input_types(self):
return [OperatorDataTypes.File, OperatorDataTypes.StringAttribute, OperatorDataTypes.StringAttribute]
@property
def input_names(self):
return ["Survey Input File", "Survey data type", "Project Filter"]
input_descriptions = input_names
@property
def output_types(self):
return [OperatorDataTypes.Survey, OperatorDataTypes.Shapefile]
@property
def output_names(self):
return ["Survey", "Survey Shapefile"]
output_descriptions = output_names
# public method override
###########################
@logAPICall
def do_operation(self):
""" perform survey data loading """
# input/output data checking already done during property set
survey = self.inputs[0].value
project = self.inputs[2].value
tmp_survey_file = '%ssurvey_%s.shp' % (self._tmp_dir, get_unique_filename())
# load survey
try:
self._loadSurvey(survey, tmp_survey_file, project)
except Exception as err:
remove_shapefile(tmp_survey_file)
raise OperatorError("Error Loading Survey\n%s" % err,
self.__class__)
try:
# store loaded data
tmp_survey_layername = 'survey_%s' % get_unique_filename()
tmp_survey_layer = load_shapefile_verify(tmp_survey_file, tmp_survey_layername,
[self._lon_field, self._lat_field, self._tax_field])
except Exception as err:
raise OperatorError("Error Loading Survey\n%s" % err,
self.__class__)
self.outputs[0].value = tmp_survey_layer
self.outputs[1].value = tmp_survey_file
# protected method override
####################################
def _verify_inputs(self, inputs):
""" perform operator specific input validation """
if not exists(inputs[0].value):
raise OperatorDataError("input file %s does not exist" % (inputs[0].value))
def _verify_outputs(self, outputs):
""" perform operator specific input validation """
pass
# internal helper methods
####################################
def _loadSurvey(self, sqlitepath, shapefilepath, proj_uid=None):
# load data
sql = """select OBJ_UID, X, Y, SAMPLE_GRP, PLAN_AREA, REPLC_COST,
MAT_TYPE_L, MAT_TECH_L, MAS_REIN_L, MAS_MORT_L, STEELCON_L,
LLRS_L, LLRS_DCT_L,
ROOFSYSMAT, ROOFSYSTYP,
FLOOR_MAT, FLOOR_TYPE,
STORY_AG_Q, STORY_AG_1, STORY_AG_2,
YR_BUILT_Q, YR_BUILT_1, YR_BUILT_2,
STR_IRREG, STR_HZIR_P, STR_HZIR_S, STR_VEIR_P, STR_VEIR_S,
OCCUPCY, OCCUPCY_DT
from GEM_OBJECT o LEFT JOIN GED g on o.OBJ_UID=g.GEMOBJ_UID"""
# SQL injection check not too important here given that data format is SQLite
if proj_uid is not None:
sql = "%s WHERE PROJ_UID='%s'" % (sql, proj_uid)
conn = sqlite3.connect(sqlitepath)
c = conn.cursor()
c.execute(sql)
self._buildSurveyLayer(c, shapefilepath)
c.close()
conn.close()
def _buildSurveyLayer(self, data, shapefilepath):
writer = QgsVectorFileWriter(shapefilepath, "utf-8", self._fields, QGis.WKBPoint, self._crs, "ESRI Shapefile")
f = QgsFeature()
for row in data:
obj_uid = str(row[0])
lon = self._tofloat(row[1])
lat = self._tofloat(row[2])
sample_grp = str(row[3])
plan_area = self._tofloat(row[4])
rep_cost = self._tofloat(row[5])
tax_string = self._make_gem_taxstring(row[6:])
ht = self._get_height(row[6:])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(lon, lat)))
f.addAttribute(0, QVariant(obj_uid))
f.addAttribute(1, QVariant(lon))
f.addAttribute(2, QVariant(lat))
f.addAttribute(3, QVariant(tax_string))
f.addAttribute(4, QVariant(sample_grp))
f.addAttribute(5, QVariant(plan_area))
f.addAttribute(6, QVariant(ht))
f.addAttribute(7, QVariant(rep_cost))
writer.addFeature(f)
del writer, f
def _make_gem_taxstring(self, data):
(mat_type_l, mat_tech_l, mas_rein_l, mas_mort_l, steel_con_l,
llrs_l, llrs_duct_l,
roofsysmat, roofsystyp,
floor_mat, floor_type,
story_ag_q, story_ag_1, story_ag_2,
yr_built_q, yr_built_1, yr_built_2,
str_irreg, str_hzir_p, str_hzir_s, str_veir_p, str_veir_s,
occupcy, occupcy_dt) = [(x) for x in data]
# attribute group names
# 'Material', 'Lateral Load-Resisting System', 'Roof', 'Floor', 'Height', 'Date of Construction', 'Irregularity', 'Occupancy'
# separator for individual attributes in group
separator = self.taxonomy.get_separator(self.taxonomy.Separators.Attribute)
# material
mat_string = self._coalesce(mat_type_l) \
+ self._append_not_null(mat_tech_l, separator) + self._append_not_null(mas_rein_l, separator) \
+ self._append_not_null(mas_mort_l, separator) + self._append_not_null(steel_con_l, separator)
# lateral load
ll_string = self._coalesce(llrs_l) + self._append_not_null(llrs_duct_l,separator)
# roof
roof_string = self._coalesce(roofsysmat) + self._append_not_null(roofsystyp,separator)
# floor
floor_string = self._coalesce(floor_mat) + self._append_not_null(floor_type,separator)
# story
attribute = self.taxonomy.get_attribute_by_name('Height')
_qualifier = self._coalesce(story_ag_q)
_story1, _story2 = self._toint(story_ag_1), self._toint(story_ag_2)
if getattr(self, 'ht_ranges', None) is None:
if _qualifier == 'HBET':
ht_string = attribute.make_string([_story2, _story1], GemTaxonomyAttribute.RANGE)
elif _qualifier == 'HAPP':
ht_string = attribute.make_string([_story2, 0], GemTaxonomyAttribute.APP)
else:
ht_string = attribute.make_string([_story1, 0], GemTaxonomyAttribute.EXACT)
else:
if _qualifier == "HBET":
ht_range = self._find_range((_story1 + _story2) / 2.0,
self.ht_ranges['min_values'], self.ht_ranges['max_values'])
else: # EXACT or APPROXIMATE
ht_range = self._find_range(_story1,
self.ht_ranges['min_values'], self.ht_ranges['max_values'])
if _story1 is None or _story1 == 0:
ht_range = [None, None]
elif ht_range[0] is None and ht_range[1] is not None:
self.ht_ranges['min_values'].insert(0, 1)
self.ht_ranges['max_values'].insert(0, ht_range[1])
elif ht_range[1] is None and ht_range[0] is not None:
self.ht_ranges['min_values'].append(ht_range[0])
self.ht_ranges['max_values'].append(200)
ht_string = attribute.make_string(ht_range, GemTaxonomyAttribute.RANGE)
# yr_built
attribute = self.taxonomy.get_attribute_by_name('Date of Construction')
_qualifier = self._coalesce(yr_built_q)
_year1, _year2 = self._toint(yr_built_1), self._toint(yr_built_2)
if getattr(self, 'yr_ranges', None) is None:
if _qualifier == 'YAPP':
yr_string = attribute.make_string([_year2, 0], GemTaxonomyAttribute.APP)
elif _qualifier== 'YPRE':
yr_string = attribute.make_string([_year2, 0], GemTaxonomyAttribute.PRE)
elif _qualifier == 'YBET':
yr_string = attribute.make_string([_year2, _year1], GemTaxonomyAttribute.RANGE)
else:
yr_string = attribute.make_string([_year1, 0], GemTaxonomyAttribute.EXACT)
else:
if _qualifier == "YBET":
yr_ranges = self._find_range((_year1 + _year2) / 2.0,
self.yr_ranges['min_values'], self.yr_ranges['max_values'])
else: # EXACT or APPROXIMATE
yr_ranges = self._find_range(_year1,
self.yr_ranges['min_values'], self.yr_ranges['max_values'])
if _year1 is None or _year1 == 0:
yr_ranges = [None, None]
elif yr_ranges[0] is None and yr_ranges[1] is not None:
self.yr_ranges['min_values'].insert(0, 1)
self.yr_ranges['max_values'].insert(0, yr_ranges[1])
elif yr_ranges[1] is None and yr_ranges[0] is not None:
self.yr_ranges['min_values'].append(yr_ranges[0])
self.yr_ranges['max_values'].append(datetime.date.today().year)
yr_string = attribute.make_string(yr_ranges, GemTaxonomyAttribute.RANGE)
# irregularity
ir_string = self._append_no_repeat([str_irreg, str_hzir_p, str_hzir_s, str_veir_p, str_veir_s],
separator, exclude="IRN")
# occupancy
occ_string = self._coalesce(occupcy) + self._append_not_null(occupcy_dt,separator)
# constructs output string
separator = self.taxonomy.get_separator(self.taxonomy.Separators.AttributeGroup)
return (mat_string + self._append_not_null(ll_string,separator)
+ self._append_not_null(roof_string,separator)
+ self._append_not_null(floor_string,separator)
+ self._append_not_null(ht_string,separator)
+ self._append_not_null(yr_string,separator)
+ self._append_not_null(ir_string,separator)
+ self._append_not_null(occ_string,separator))
def _get_height(self, data):
""" retrieve height as numeric value from SQLite Query Result """
story_ag_q, story_ag_1, story_ag_2 = data[11:14]
ht = 0
if story_ag_1 is None:
ht = 0
elif self._coalesce(story_ag_q) == "HBET":
ht = (self._toint(story_ag_1) + self._toint(story_ag_2)) / 2
else:
ht = self._toint(story_ag_1)
return int(ht)
def _coalesce(self, val):
""" returns val or blank string if val is null (None) """
if (val is not None):
return str(val).upper()
else:
return ""
def _toint(self, val):
""" convert val to integer, return 0 if conversion fails """
try:
return int(val)
except:
return 0
def _tofloat(self, val):
""" convert val to floating point, return 0.0 if conversion fails """
try:
return float(val)
except:
return 0.0
def _append_not_null(self, val, separator):
""" append val with separator if val is not empty """
if (val is None or val == ""):
return ""
else:
return separator + str(val)
def _append_no_repeat(self, vals, separator, exclude=''):
""" concatenate list of values using separator if value is not empty and not excluded """
no_repeat = {}
for val in vals:
if val is None or val == "" or val == exclude:
continue
no_repeat[val]=1
return str(separator).join(no_repeat.keys())
def _find_range(self, value, min_values, max_values):
""" find min/max values surrounding given value """
# less than minimum
if value < min_values[0]:
return None, min_values[0]
# test ranges
for min_val, max_val in map(None, min_values, max_values):
if value >= min_val and value <= max_val:
return min_val, max_val
# larger than maximum
return max_values[len(max_values)-1], None
class CSVSurveyLoader(GEMDBSurveyLoader):
""" loading field survey data in CSV format"""
def __init__(self, options=None, name="Survey Loader"):
""" constructor """
super(CSVSurveyLoader, self).__init__(options, name)
def _loadSurvey(self, csvpath, shapefilepath):
# load data
data = csv.reader(open(csvpath, 'r'), delimiter=',', quotechar='"')
# skip header, there is probably a better way to accomplish this
data.next()
writer = QgsVectorFileWriter(shapefilepath, "utf-8", self._fields, QGis.WKBPoint, self._crs, "ESRI Shapefile")
f = QgsFeature()
gid = 0
for row in data:
lon = float(row[0])
lat = float(row[1])
f.setGeometry(QgsGeometry.fromPoint(QgsPoint(lon, lat)))
gid+=1
f.addAttribute(0, QVariant(gid))
f.addAttribute(1, QVariant(lon))
f.addAttribute(2, QVariant(lat))
f.addAttribute(3, QVariant(row[2]))
writer.addFeature(f)
del writer, f
|
agpl-3.0
| 2,007,593,762,775,590,700
| 44.007833
| 133
| 0.55288
| false
| 3.816641
| false
| false
| false
|
ajerneck/thatsfordinner
|
canonical.py
|
1
|
1153
|
"Use a canonical list of ingredients or foods to match."
import nltk
import pandas as pd
import functions as f
import common
## exploiting that ingredients are mentioned in instructions as well.
con = common.make_engine()
dfe = pd.read_sql_table('recipes_recipe', con)
x = dfe.head()
## intersection of ingredients and instructions.
set(x['ingredient_txt'].str.split()[0]).intersection(set(x['instruction_txt'].str.split()[0]))
## olive oil
## parsley
## lemon peel
## garlick
## lemon juice
## kosher salt
## black pepper
## spinache and artichoke ravioli
## baby artichokes
## cannonical list attempt.
df = f.load_data()
## using canonical ingredient list.
cf = pd.read_csv('/home/alexander/start/archives/2015/2015-start/code/data-science/incubator-challenge/q3/fooddb/compounds_foods.csv', escapechar="\\")
vocab = cf['orig_food_common_name'].str.lower().unique()
## edit distances.
sents = df['ingredient_txt'].map(lambda x: map(nltk.word_tokenize, x.split('\n')))
sents = map(lambda x: x[1:-1], sents)
sents[0:10]
## simple approach: for the most probable words for each topic, if a unigram appears in a bigram, filter it out.
|
bsd-3-clause
| 7,886,874,209,994,783,000
| 22.06
| 151
| 0.718127
| false
| 2.889724
| false
| false
| false
|
yaybu/touchdown
|
touchdown/aws/apigateway/method_response.py
|
1
|
2446
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, resource, serializers
from touchdown.core.plan import Plan
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy
from .resource import Resource
class MethodResponse(resource.Resource):
resource_name = "method_response"
name = argument.String(field="httpMethod")
status_code = argument.String(field="statusCode")
response_parameters = argument.Dict(field="responseParameters")
response_models = argument.Dict(field="responseModels")
resource = argument.Resource(Resource, field="resourceId")
class Describe(SimpleDescribe, Plan):
resource = MethodResponse
service_name = "apigateway"
api_version = "2015-07-09"
describe_action = "get_method_response"
describe_notfound_exception = "NotFoundException"
describe_envelope = "[@]"
key = "httpMethod"
def get_describe_filters(self):
api = self.runner.get_plan(self.resource.resource.api)
if not api.resource_id:
return None
resource = self.runner.get_plan(self.resource.resource)
if not resource.resource_id:
return None
return {
"restApiId": api.resource_id,
"resourceId": resource.resource_id,
"httpMethod": self.resource.name,
"statusCode": self.resource.status_code,
}
class Apply(SimpleApply, Describe):
create_action = "put_method_response"
create_envelope = "@"
def get_create_serializer(self):
return serializers.Resource(restApiId=self.resource.resource.api.identifier())
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_method_response"
def get_destroy_serializer(self):
return serializers.Dict(
restApiId=self.resource.resource.api.identifier(),
resourceId=self.resource.identifier(),
)
|
apache-2.0
| 5,690,618,798,359,818,000
| 30.766234
| 86
| 0.701962
| false
| 4.117845
| false
| false
| false
|
js850/pele
|
examples/new_potential/mypotential.py
|
1
|
3624
|
"""
an example of how to create a new potential.
"""
from pele.potentials import BasePotential
class MyPot(BasePotential):
"""a Lennard Jones potential with altered exponents
V(r) = 4. * (r**-24 - r**-12)
"""
def __init__(self, natoms):
self.natoms = natoms #number of atoms
def getEnergy(self, coords):
coords = np.reshape(coords, [self.natoms,3])
E = 0.
for i in range(self.natoms):
for j in range(i):
r = np.sqrt(np.sum((coords[i,:] - coords[j,:])**2))
E += 4. * (r**-24 - r**-12)
return E
def getEnergyGradient(self, coords):
coords = np.reshape(coords, [self.natoms,3])
E = 0.
grad = np.zeros(coords.shape)
for i in range(self.natoms):
for j in range(i):
dr = coords[i,:] - coords[j,:]
r = np.sqrt(np.sum(dr**2))
E += 4. * (r**(-24) - r**(-12))
g = 4. * ( 24. * r**(-25) - 12. * r**(-13))
grad[i,:] += -g * dr/r
grad[j,:] += g * dr/r
return E, grad.reshape(-1)
from pele.systems import BaseSystem
from pele.mindist import MinPermDistAtomicCluster, ExactMatchAtomicCluster
from pele.transition_states import orthogopt
class MySystem(BaseSystem):
def __init__(self, natoms):
super(MySystem, self).__init__()
self.natoms = natoms
self.params.database.accuracy =0.1
def get_potential(self):
return MyPot(self.natoms)
def get_mindist(self):
permlist = [range(self.natoms)]
return MinPermDistAtomicCluster(permlist=permlist, niter=10)
def get_orthogonalize_to_zero_eigenvectors(self):
return orthogopt
def get_compare_exact(self, **kwargs):
permlist = [range(self.natoms)]
return ExactMatchAtomicCluster(permlist=permlist, **kwargs)
import numpy as np
def run_basinhopping():
natoms = 8
system = MySystem(natoms)
database = system.create_database()
x0 = np.random.uniform(-1,1,[natoms*3])
bh = system.get_basinhopping(database=database, coords=x0)
bh.run(10)
print "found", len(database.minima()), "minima"
min0 = database.minima()[0]
print "lowest minimum found has energy", min0.energy
return system, database
def run_double_ended_connect(system, database):
# connect the all minima to the lowest minimum
from pele.landscape import ConnectManager
manager = ConnectManager(database, strategy="gmin")
for i in xrange(database.number_of_minima()-1):
min1, min2 = manager.get_connect_job()
connect = system.get_double_ended_connect(min1, min2, database)
connect.connect()
from pele.utils.disconnectivity_graph import DisconnectivityGraph, database2graph
import matplotlib.pyplot as plt
def make_disconnectivity_graph(database):
graph = database2graph(database)
dg = DisconnectivityGraph(graph, nlevels=3, center_gmin=True)
dg.calculate()
dg.plot()
plt.show()
def test_potential():
import numpy as np
natoms = 5
pot = MyPot(natoms)
coords = np.random.uniform(-1,1,natoms*3)
e = pot.getEnergy(coords)
print e
e, g = pot.getEnergyGradient(coords)
print e
gnum = pot.NumericalDerivative(coords, eps=1e-6)
print np.max(np.abs(gnum-g)), np.max(np.abs(gnum))
print np.max(np.abs(gnum-g)) / np.max(np.abs(gnum))
if __name__ == "__main__":
#test_potential()
mysys, database = run_basinhopping()
run_double_ended_connect(mysys, database)
make_disconnectivity_graph(database)
|
gpl-3.0
| -772,127,674,280,667,800
| 30.789474
| 81
| 0.613962
| false
| 3.340092
| false
| false
| false
|
liampauling/flumine
|
examples/middleware/orders.py
|
1
|
3222
|
import logging
from flumine import config
from flumine.utils import STRATEGY_NAME_HASH_LENGTH
from flumine.markets.middleware import Middleware
from flumine.order.trade import Trade
logger = logging.getLogger(__name__)
class OrdersMiddleware(Middleware):
"""
Middleware to add execution complete orders
to the blotter. This is required on a restart
as the order stream does not include
EXECUTION_COMPLETE orders
"""
def __init__(self, flumine):
self.flumine = flumine
def add_market(self, market) -> None:
resp = self.flumine.client.betting_client.betting.list_current_orders(
customer_strategy_refs=[config.hostname],
order_projection="EXECUTION_COMPLETE",
)
for current_order in resp.orders:
logger.info(
"OrdersMiddleware: Processing order {0}".format(current_order.bet_id),
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
},
)
order = self._create_order_from_current(current_order, market)
if order:
order.update_current_order(current_order)
order.execution_complete()
def _create_order_from_current(self, current_order, market):
strategy_name_hash = current_order.customer_order_ref[
:STRATEGY_NAME_HASH_LENGTH
]
order_id = current_order.customer_order_ref[STRATEGY_NAME_HASH_LENGTH + 1 :]
# get strategy
strategy = self.flumine.strategies.hashes.get(strategy_name_hash)
if strategy is None:
logger.warning(
"OrdersMiddleware: Strategy not available to create order {0}".format(
order_id
),
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
"strategy_name": str(strategy),
},
)
return
# add trade/order
trade = Trade(
market.market_id,
current_order.selection_id,
current_order.handicap,
strategy,
)
order = trade.create_order_from_current(current_order, order_id)
market.blotter[order.id] = order
runner_context = strategy.get_runner_context(*order.lookup)
runner_context.place(trade.id)
logger.info(
"OrdersMiddleware: New order trade created",
extra={
"bet_id": current_order.bet_id,
"market_id": current_order.market_id,
"customer_strategy_ref": current_order.customer_strategy_ref,
"customer_order_ref": current_order.customer_order_ref,
"strategy_name": str(strategy),
},
)
return order
|
mit
| -2,638,615,105,914,977,300
| 37.819277
| 86
| 0.574798
| false
| 4.200782
| false
| false
| false
|
boegel/easybuild-easyblocks
|
easybuild/easyblocks/generic/rubygem.py
|
1
|
4937
|
##
# Copyright 2015-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Ruby Gems, implemented as an easyblock
@author: Robert Schmidt (Ottawa Hospital Research Institute)
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy_file
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class RubyGem(ExtensionEasyBlock):
"""Builds and installs Ruby Gems."""
@staticmethod
def extra_options(extra_vars=None):
"""Extra easyconfig parameters specific to RubyGem easyblock."""
extra_vars = ExtensionEasyBlock.extra_options(extra_vars)
extra_vars.update({
'gem_file': [None, "Path to gem file in unpacked sources", CUSTOM],
})
return extra_vars
def __init__(self, *args, **kwargs):
"""RubyGem easyblock constructor."""
super(RubyGem, self).__init__(*args, **kwargs)
self.ext_src = None
def run(self):
"""Perform the actual Ruby gem build/install"""
if not self.src:
raise EasyBuildError("No source found for Ruby Gem %s, required for installation.", self.name)
super(RubyGem, self).run()
self.ext_src = self.src
self.log.debug("Installing Ruby gem %s version %s." % (self.name, self.version))
self.install_step()
def extract_step(self):
"""Skip extraction of .gem files, which are installed as downloaded"""
if len(self.src) > 1:
raise EasyBuildError("Don't know how to handle Ruby gems with multiple sources.")
else:
src = self.src[0]
if src['path'].endswith('.gem'):
copy_file(src['path'], self.builddir)
self.ext_src = src['name']
# set final path since it can't be determined from unpacked sources (used for guessing start_dir)
src['finalpath'] = self.builddir
else:
# unpack zipped gems, use specified path to gem file
super(RubyGem, self).extract_step()
if self.cfg['gem_file']:
self.ext_src = os.path.join(src['finalpath'], self.cfg['gem_file'])
if not os.path.exists(self.ext_src):
raise EasyBuildError("Gem file not found at %s", self.ext_src)
else:
raise EasyBuildError("Location to gem file in unpacked sources must be specified via gem_file")
def configure_step(self):
"""No separate configuration for Ruby Gems."""
pass
def build_step(self):
"""No separate build procedure for Ruby Gems."""
pass
def test_step(self):
"""No separate (standard) test procedure for Ruby Gems."""
pass
def install_step(self):
"""Install Ruby Gems using gem package manager"""
ruby_root = get_software_root('Ruby')
if not ruby_root:
raise EasyBuildError("Ruby module not loaded?")
# this is the 'proper' way to specify a custom installation prefix: set $GEM_HOME
if not self.is_extension or self.master.name != 'Ruby':
env.setvar('GEM_HOME', self.installdir)
bindir = os.path.join(self.installdir, 'bin')
run_cmd("gem install --bindir %s --local %s" % (bindir, self.ext_src))
def make_module_extra(self):
"""Extend $GEM_PATH in module file."""
txt = super(RubyGem, self).make_module_extra()
# for stand-alone Ruby gem installs, $GEM_PATH needs to be updated
if not self.is_extension or self.master.name != 'Ruby':
txt += self.module_generator.prepend_paths('GEM_PATH', [''])
return txt
|
gpl-2.0
| 2,007,238,328,556,488,700
| 38.814516
| 115
| 0.648167
| false
| 3.760091
| false
| false
| false
|
sukenda/django-api-tutorial
|
blog/settings.py
|
1
|
5512
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# BASE_DIR = "/Users/jmitch/desktop/blog/src/"
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party
'markdown_deux',
'pagedown',
'rest_framework',
# local apps
'comments',
'posts',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
LOGIN_URL = "/login/"
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'init_command': 'SET default_storage_engine=INNODB',
'ENGINE': 'django.db.backends.mysql',
'NAME': 'api_sandbox',
'OPTIONS': {
'charset': 'utf8mb4',
'sql_mode': 'traditional',
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
# 'DEFAULT_PARSER_CLASSES': (
# 'rest_framework.parsers.JSONParser',
# )
"DEFAULT_AUTHENTICATION_CLASSES": (
# 'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
# 'rest_framework.authentication.BasicAuthentication'
),
"DEFAULT_PERMISSION_CLASSES": (
'rest_framework.permissions.IsAuthenticated',
# 'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
'''
curl -X POST -d "username=cfe&password=learncode" http://127.0.0.1:8000/api/auth/token/
eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY1ODI5fQ.OTX7CZFZqxhaUnU9Da13Ebh9FY_bHMeCF1ypr9hXjWw
curl -H "Authorization: JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY1ODI5fQ.OTX7CZFZqxhaUnU9Da13Ebh9FY_bHMeCF1ypr9hXjWw
" http://127.0.0.1:8000/api/comments/
curl -X POST -H "Authorization: JWT eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6ImNmZSIsInVzZXJfaWQiOjEsImVtYWlsIjoiIiwiZXhwIjoxNDYxOTY2MTc4fQ._i5wEqJ_OO8wNiVVNAWNPGjGaO7OzChY0UzONgw06D0" -H "Content-Type: application/json" -d '{"content":"some reply to another try"}' 'http://127.0.0.1:8000/api/comments/create/?slug=new-title&type=post&parent_id=13'
curl http://127.0.0.1:8000/api/comments/
'''
|
mit
| 5,082,875,815,669,808,000
| 28.956522
| 360
| 0.695573
| false
| 3.036915
| false
| false
| false
|
vianuevm/cppStyle
|
comment_checks.py
|
1
|
2368
|
import re
from pyparsing import Word, Literal, alphanums
def check_line_width(self, line):
max_length = self.max_line_length
current_length = len(line)
if current_length > max_length:
self.add_error(label="LINE_WIDTH", data={'length': current_length})
def check_missing_rme(self, lines):
function = Word(alphanums + '_')
function_syntax = function + Literal('(')
parsed = function_syntax.searchString(lines[self.current_line_num]).asList()
function_name = parsed[0][0]
function_signature = lines[self.current_line_num].strip().replace(';','').strip()
if function_name != 'main':
requires = effects = modifies = False
#Check if there's a complete RME in the last 10 lines
start = self.current_line_num - 10
if start < 0:
start = 0
for line_num in range(start, self.current_line_num):
code = lines[line_num].lower()
if re.search('requires', code): requires = True
if re.search('effects', code): effects = True
if re.search('modifies', code): modifies = True
# If it's not there, maybe they defined it in a header file.
if not (requires and effects and modifies) and (function_signature not in self.all_rme[self.current_file]):
# error only in this case
# prevent double-counting
if function_signature not in self.missing_rme[self.current_file]:
self.add_error("MISSING_RME", data={'function': function_name, 'function_signature': function_signature})
self.missing_rme[self.current_file].add(function_signature)
elif function_signature not in self.all_rme[self.current_file]:
self.all_rme[self.current_file].add(function_signature)
def check_min_comments(self, all_lines, clean_lines):
num_lines = len(all_lines) + 1
num_comments = 0
blank_lines_at_end = 0
for index, line in enumerate(all_lines):
if line != clean_lines[index]:
num_comments += 1
if line[0] == u'\n':
blank_lines_at_end += 1
else:
blank_lines_at_end = 0
num_lines -= (blank_lines_at_end + 1)
if num_comments < num_lines * self.min_comments_ratio:
self.add_error(label='MIN_COMMENTS', line=0, type="WARNING", data={'comments': num_comments, 'lines': num_lines})
|
mit
| 1,191,410,062,008,988,000
| 45.45098
| 121
| 0.625845
| false
| 3.688474
| false
| false
| false
|
hyc/HyperDex
|
test/python/RegexSearch.py
|
1
|
2310
|
#!/usr/bin/env python
import sys
import hyperdex.client
from hyperdex.client import LessEqual, GreaterEqual, Range, Regex, LengthEquals, LengthLessEqual, LengthGreaterEqual
c = hyperdex.client.Client(sys.argv[1], int(sys.argv[2]))
def to_objectset(xs):
return set([frozenset(x.items()) for x in xs])
assert c.put('kv', 'foo/foo/foo', {}) == True
assert c.put('kv', 'foo/foo/bar', {}) == True
assert c.put('kv', 'foo/foo/baz', {}) == True
assert c.put('kv', 'foo/bar/foo', {}) == True
assert c.put('kv', 'foo/bar/bar', {}) == True
assert c.put('kv', 'foo/bar/baz', {}) == True
assert c.put('kv', 'foo/baz/foo', {}) == True
assert c.put('kv', 'foo/baz/bar', {}) == True
assert c.put('kv', 'foo/baz/baz', {}) == True
assert c.put('kv', 'bar/foo/foo', {}) == True
assert c.put('kv', 'bar/foo/bar', {}) == True
assert c.put('kv', 'bar/foo/baz', {}) == True
assert c.put('kv', 'bar/bar/foo', {}) == True
assert c.put('kv', 'bar/bar/bar', {}) == True
assert c.put('kv', 'bar/bar/baz', {}) == True
assert c.put('kv', 'bar/baz/foo', {}) == True
assert c.put('kv', 'bar/baz/bar', {}) == True
assert c.put('kv', 'bar/baz/baz', {}) == True
assert c.put('kv', 'baz/foo/foo', {}) == True
assert c.put('kv', 'baz/foo/bar', {}) == True
assert c.put('kv', 'baz/foo/baz', {}) == True
assert c.put('kv', 'baz/bar/foo', {}) == True
assert c.put('kv', 'baz/bar/bar', {}) == True
assert c.put('kv', 'baz/bar/baz', {}) == True
assert c.put('kv', 'baz/baz/foo', {}) == True
assert c.put('kv', 'baz/baz/bar', {}) == True
assert c.put('kv', 'baz/baz/baz', {}) == True
assert to_objectset(c.search('kv', {'k': Regex('^foo')})) == to_objectset([{'k': 'foo/foo/foo'}, {'k': 'foo/foo/bar'}, {'k': 'foo/foo/baz'}, {'k': 'foo/bar/foo'}, {'k': 'foo/bar/bar'}, {'k': 'foo/bar/baz'}, {'k': 'foo/baz/foo'}, {'k': 'foo/baz/bar'}, {'k': 'foo/baz/baz'}])
assert to_objectset(c.search('kv', {'k': Regex('foo$')})) == to_objectset([{'k': 'foo/foo/foo'}, {'k': 'foo/bar/foo'}, {'k': 'foo/baz/foo'}, {'k': 'bar/foo/foo'}, {'k': 'bar/bar/foo'}, {'k': 'bar/baz/foo'}, {'k': 'baz/foo/foo'}, {'k': 'baz/bar/foo'}, {'k': 'baz/baz/foo'}])
assert to_objectset(c.search('kv', {'k': Regex('^b.*/foo/.*$')})) == to_objectset([{'k': 'bar/foo/foo'}, {'k': 'bar/foo/bar'}, {'k': 'bar/foo/baz'}, {'k': 'baz/foo/foo'}, {'k': 'baz/foo/bar'}, {'k': 'baz/foo/baz'}])
|
bsd-3-clause
| 2,271,529,638,827,483,000
| 61.432432
| 273
| 0.55671
| false
| 2.541254
| false
| false
| false
|
GoranLundberg/RPLCD
|
RPLCD/i2c.py
|
1
|
11253
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013-2017 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from smbus import SMBus
from . import common as c
from .lcd import BaseCharLCD
# PCF8574 backlight control
PCF8574_BACKLIGHT = 0x08
PCF8574_NOBACKLIGHT = 0x00
# PCF8574 Pin bitmasks
PCF8574_E = 0x4
PIN_READ_WRITE = 0x2 # Not used?
PIN_REGISTER_SELECT = 0x1 # Not used?
# MCP230XX backlight control
MCP230XX_BACKLIGHT = 0x80
MCP230XX_NOBACKLIGHT = 0x7f
# MCP230XX pin bitmasks and datamask
MCP230XX_RS = 0x02
MCP230XX_E = 0x4
MCP230XX_DATAMASK = 0x78
MCP230XX_DATASHIFT = 3
# MCP23008 Register addresses
MCP23008_IODIR = 0x00
MCP23008_GPIO = 0x09
# MCP23017 Register addresses
MCP23017_IODIRA = 0x00
MCP23017_IODIRB = 0x01
MCP23017_GPIOA = 0x12
MCP23017_GPIOB = 0x13
class CharLCD(BaseCharLCD):
def __init__(self, i2c_expander, address, expander_params=None, port=1,
cols=20, rows=4, dotsize=8,
charmap='A02',
auto_linebreaks=True,
backlight_enabled=True):
"""
CharLCD via PCF8574 I2C port expander:
Pin mapping::
7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
D7 | D6 | D5 | D4 | BL | EN | RW | RS
CharLCD via MCP23008 and MCP23017 I2C port expanders:
Adafruit I2C/SPI LCD Backback is supported.
Warning: You might need a level shifter (that supports i2c)
between the SCL/SDA connections on the MCP chip / backpack and the Raspberry Pi.
Or you might damage the Pi and possibly any other 3.3V i2c devices
connected on the i2c bus. Or cause reliability issues. The SCL/SDA are rated 0.7*VDD
on the MCP23008, so it needs 3.5V on the SCL/SDA when 5V is applied to drive the LCD.
The MCP23008 and MCP23017 needs to be connected exactly the same way as the backpack.
For complete schematics see the adafruit page at:
https://learn.adafruit.com/i2c-spi-lcd-backpack/
4-bit operation. I2C only supported.
Pin mapping::
7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
BL | D7 | D6 | D5 | D4 | E | RS | -
:param address: The I2C address of your LCD.
:type address: int
:param i2c_expander: Set your I²C chip type. Supported: "PCF8574", "MCP23008", "MCP23017".
:type i2c_expander: string
:param expander_params: Parameters for expanders, in a dictionary. Only needed for MCP23017
gpio_bank - This must be either ``A`` or ``B``
If you have a HAT, A is usually marked 1 and B is 2
Example: expander_params={'gpio_bank': 'A'}
:type expander_params: dictionary
:param port: The I2C port number. Default: ``1``.
:type port: int
:param cols: Number of columns per row (usually 16 or 20). Default: ``20``.
:type cols: int
:param rows: Number of display rows (usually 1, 2 or 4). Default: ``4``.
:type rows: int
:param dotsize: Some 1 line displays allow a font height of 10px.
Allowed: 8 or 10. Default: ``8``.
:type dotsize: int
:param charmap: The character map used. Depends on your LCD. This must
be either ``A00`` or ``A02``. Default: ``A02``.
:type charmap: str
:param auto_linebreaks: Whether or not to automatically insert line breaks.
Default: ``True``.
:type auto_linebreaks: bool
:param backlight_enabled: Whether the backlight is enabled initially. Default: ``True``.
:type backlight_enabled: bool
"""
# Set own address and port.
self._address = address
self._port = port
# Set i2c expander, 'PCF8574', 'MCP23008' and 'MCP23017' are supported.
if i2c_expander in ['PCF8574', 'MCP23008', 'MCP23017']:
self._i2c_expander = i2c_expander
else:
raise NotImplementedError('I2C expander "%s" is not supported.' % i2c_expander)
# Errorchecking for expander parameters
if expander_params is None:
if self._i2c_expander == 'MCP23017':
raise ValueError('MCP23017: expander_params[\'gpio_bank\'] is not defined, must be either \'A\' or \'B\'')
else:
self._expander_params = {}
else:
if self._i2c_expander == 'MCP23017':
if expander_params['gpio_bank'] in ['A', 'B']:
self._expander_params = {}
self._expander_params['gpio_bank'] = expander_params['gpio_bank']
else:
raise ValueError('MCP23017: expander_params[\'gpio_bank\'] is \'%s\' must be either \'A\' or \'B\'' % expander_params['gpio_bank'])
# Currently the I2C mode only supports 4 bit communication
self.data_bus_mode = c.LCD_4BITMODE
# Set backlight status
if self._i2c_expander == 'PCF8574':
self._backlight = PCF8574_BACKLIGHT if backlight_enabled else PCF8574_NOBACKLIGHT
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._backlight = MCP230XX_BACKLIGHT if backlight_enabled else MCP230XX_NOBACKLIGHT
# Call superclass
super(CharLCD, self).__init__(cols, rows, dotsize,
charmap=charmap,
auto_linebreaks=auto_linebreaks)
# Refresh backlight status
self.backlight_enabled = backlight_enabled
def _init_connection(self):
self.bus = SMBus(self._port)
if self._i2c_expander == 'PCF8574':
c.msleep(50)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
# Variable for storing data and applying bitmasks and shifting.
self._mcp_data = 0
# Set iodir register value according to expander
# If using MCP23017 set which gpio bank to use, A or B
if self._i2c_expander == 'MCP23008':
IODIR = MCP23008_IODIR
self._mcp_gpio = MCP23008_GPIO
elif self._i2c_expander == 'MCP23017':
# Set gpio bank A or B
if self._expander_params['gpio_bank'] == 'A':
IODIR = MCP23017_IODIRA
self._mcp_gpio = MCP23017_GPIOA
elif self._expander_params['gpio_bank'] == 'B':
IODIR = MCP23017_IODIRB
self._mcp_gpio = MCP23017_GPIOB
# Set IO DIRection to output on all GPIOs (GP0-GP7)
self.bus.write_byte_data(self._address, IODIR, 0x00)
def _close_connection(self):
# Nothing to do here?
pass
# Properties
def _get_backlight_enabled(self):
if self._i2c_expander == 'PCF8574':
return self._backlight == PCF8574_BACKLIGHT
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
return self._backlight == MCP230XX_BACKLIGHT
def _set_backlight_enabled(self, value):
if self._i2c_expander == 'PCF8574':
self._backlight = PCF8574_BACKLIGHT if value else PCF8574_NOBACKLIGHT
self.bus.write_byte(self._address, self._backlight)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
if value is True:
self._mcp_data |= MCP230XX_BACKLIGHT
else:
self._mcp_data &= MCP230XX_NOBACKLIGHT
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
backlight_enabled = property(_get_backlight_enabled, _set_backlight_enabled,
doc='Whether or not to enable the backlight. Either ``True`` or ``False``.')
# Low level commands
def _send_data(self, value):
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, (c.RS_DATA | (value & 0xF0)) | self._backlight)
self._pulse_data(c.RS_DATA | (value & 0xF0))
self.bus.write_byte(self._address, (c.RS_DATA |
((value << 4) & 0xF0)) | self._backlight)
self._pulse_data(c.RS_DATA | ((value << 4) & 0xF0))
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data |= MCP230XX_RS
self._pulse_data(value >> 4)
self._pulse_data(value & 0x0F)
def _send_instruction(self, value):
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, (c.RS_INSTRUCTION |
(value & 0xF0)) | self._backlight)
self._pulse_data(c.RS_INSTRUCTION | (value & 0xF0))
self.bus.write_byte(self._address, (c.RS_INSTRUCTION |
((value << 4) & 0xF0)) | self._backlight)
self._pulse_data(c.RS_INSTRUCTION | ((value << 4) & 0xF0))
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_RS
self._pulse_data(value >> 4)
self._pulse_data(value & 0x0F)
def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100)
|
mit
| 4,186,489,563,130,845,700
| 41.460377
| 151
| 0.594383
| false
| 3.438875
| false
| false
| false
|
pienkowb/omelette
|
omelette/compiler/parser.py
|
1
|
4614
|
from pyparsing import *
from omelette.compiler.lexer import Lexer
from omelette.compiler.uml import *
from omelette.compiler import logging
def callback(handler):
def wrapper(self, s, l, t):
handler(self, t)
return wrapper
class Parser(object):
"""A class translating code to UMLObjects."""
def __init__(self, lexer=Lexer()):
"""Constructor that pins to a provided Lexer."""
self.__lexer = lexer
self.__register_handlers()
def parse(self, code_objects):
self.__uml_object = None
self.__last_type = None
self.__objects = {}
for code_object in code_objects:
if code_object.position < 0:
if not code_object.is_empty():
message = "object definition without header"
logging.getLogger("compiler").warning(message)
continue
self.__code_object = code_object
self.__lexer["definition"].parseString(str(code_object))
return self.__objects
def __register_handlers(self):
"""Sets parseActions for appropriate tokens in lexer."""
self.__lexer["definition"].setParseAction(self.__handle_definition)
self.__lexer["header"].setParseAction(self.__handle_header)
self.__lexer["operation"].setParseAction(self.__handle_operation)
self.__lexer["attribute"].setParseAction(self.__handle_attribute)
self.__lexer["property"].setParseAction(self.__handle_property)
self.__lexer["constraint"].setParseAction(self.__handle_constraint)
self.__lexer["multiplicity"].setParseAction(self.__handle_multiplicity)
self.__lexer["name"].setParseAction(self.__handle_name)
self.__lexer["error"].setParseAction(self.__handle_error)
@callback
def __handle_definition(self, token):
name = self.__uml_object.name
self.__objects[name] = self.__uml_object
self.__uml_object = None
@callback
def __handle_error(self, token):
line = token["error"].get("line")
message = "unrecognised syntax: " + line
logging.getLogger("compiler").warning(message, object=self.__uml_object)
@callback
def __handle_header(self, token):
name = token["header"].get("name")
parent = token["header"]["parent"]
prototype = "prototype" in token["header"]
if name == None:
name = "%s" % self.__code_object.position
if parent == "base":
parent = None
self.__uml_object = UMLObject(parent, name, prototype,
self.__code_object)
@callback
def __handle_attribute(self, token):
static = "static" in token["attribute"]
visibility = token["attribute"].get("visibility")
name = token["attribute"]["name"]
type = token["attribute"].get("type")
default = token["attribute"].get("default")
attribute = Attribute(visibility, name, static, type, default)
self.__uml_object.add_attribute(attribute)
@callback
def __handle_operation(self, token):
static = "static" in token["operation"]
visibility = token["operation"].get("visibility")
name = token["operation"]["name"]
parameters = []
if "parameters" in token["operation"]:
for parameter in token["operation"]["parameters"]:
parameter_name = parameter["name"]
type = parameter.get("type")
parameters.append((parameter_name, type))
return_type = token["operation"].get("return_type")
operation = Operation(visibility, name, static, parameters, return_type)
self.__uml_object.add_operation(operation)
@callback
def __handle_property(self, token):
name = token["property"]["name"]
value = "".join(token["property"]["value"])
type = self.__last_type if self.__last_type else "STRING"
self.__last_type = None
self.__uml_object.properties[name] = (value, type)
@callback
def __handle_constraint(self, token):
value = token.get("value")
constants = token.get("constants")
if constants != None: value = list(constants)
if token["type"] == "allow":
self.__uml_object.allowed[token["key"]] = value
elif token["type"] == "require":
self.__uml_object.required[token["key"]] = value
@callback
def __handle_multiplicity(self, token):
self.__last_type = "MULTIPLICITY"
@callback
def __handle_name(self, token):
self.__last_type = "OBJECT"
|
gpl-3.0
| 2,444,114,487,353,214,500
| 32.194245
| 80
| 0.597529
| false
| 4.394286
| false
| false
| false
|
qilicun/python
|
python3/src/oo/vehicle0.py
|
1
|
3006
|
from pysketcher import *
R = 1 # radius of wheel
L = 4 # distance between wheels
H = 2 # height of vehicle body
w_1 = 5 # position of front wheel
xmax = w_1 + 2*L + 3*R
drawing_tool.set_coordinate_system(xmin=0, xmax=xmax,
ymin=-1, ymax=2*R + 3*H,
axis=False)
wheel1 = Circle(center=(w_1, R), radius=R)
wheel2 = wheel1.copy()
wheel2.translate((L,0))
under = Rectangle(lower_left_corner=(w_1-2*R, 2*R),
width=2*R + L + 2*R, height=H)
over = Rectangle(lower_left_corner=(w_1, 2*R + H),
width=2.5*R, height=1.25*H)
wheels = Composition({'wheel1': wheel1, 'wheel2': wheel2})
body = Composition({'under': under, 'over': over})
vehicle = Composition({'wheels': wheels, 'body': body})
ground = Wall(x=[R, xmax], y=[0, 0], thickness=-0.3*R)
fig = Composition({'vehicle': vehicle, 'ground': ground})
fig.draw() # send all figures to plotting backend
drawing_tool.display()
drawing_tool.savefig('tmp1.png')
fig['vehicle']['wheels'].set_filled_curves('blue')
fig['vehicle']['wheels'].set_linewidth(6)
fig['vehicle']['wheels'].set_linecolor('black')
fig['vehicle']['body']['under'].set_filled_curves('red')
fig['vehicle']['body']['over'].set_filled_curves(pattern='/')
fig['vehicle']['body']['over'].set_linewidth(14)
drawing_tool.erase() # avoid drawing old and new fig on top of each other
fig.draw()
drawing_tool.display()
drawing_tool.savefig('tmp2.png')
print fig
fig.recurse('fig')
fig.graphviz_dot('fig', False)
import time
time.sleep(1)
# Animate motion
fig['vehicle'].translate((L,0)) # move to start point for "driving"
def v(t):
return -8*R*t*(1 - t/(2*R))
import numpy
tp = numpy.linspace(0, 2*R, 25)
dt = tp[1] - tp[0] # time step
def move(t, fig):
x_displacement = dt*v(t)
fig['vehicle'].translate((x_displacement, 0))
files = animate(fig, tp, move, moviefiles=True,
pause_per_frame=0)
os.system('convert -delay 20 %s anim.gif' % files)
os.system('ffmpeg -i "tmp_frame_%04d.png" -b 800k -r 25 -vcodec mpeg4 -y -qmin 2 -qmax 31 anim.mpeg')
try:
from scitools.std import movie
except ImportError:
raise ImportError(
'scitools must be installed for running the "movie" function.\n'
'scitools is installed by sudo apt-get install python-scitools\n'
'on Ubuntu or by sudo python setup.py install if the code is\n'
'downloaded from http://code.google.com/p/scitools.')
# HTML page showing individual frames
movie(files, encoder='html', fps=4, output_file='anim.html')
# Standard GIF file
movie(files, encoder='convert', fps=4, output_file='anim2.gif')
# AVI format
movie('tmp_*.png', encoder='ffmpeg', fps=4,
output_file='anim.avi') # requires ffmpeg package
# MPEG format
movie('tmp_*.png', encoder='ffmpeg', fps=4,
output_file='anim3.mpeg', vodec='mpeg2video')
# or
movie(files, encoder='ppmtompeg', fps=24,
output_file='anim2.mpeg') # requires the netpbm package
raw_input()
|
gpl-3.0
| -143,500,134,917,915,000
| 29.363636
| 101
| 0.644378
| false
| 2.944172
| false
| false
| false
|
DemocracyLab/CivicTechExchange
|
civictechprojects/migrations/0009_auto_20180403_1604.py
|
1
|
1786
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-04-03 16:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('civictechprojects', '0008_auto_20180317_2315'),
]
operations = [
migrations.CreateModel(
name='ProjectPosition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position_description', models.CharField(max_length=3000)),
('position_project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='positions', to='civictechprojects.Project')),
],
),
migrations.CreateModel(
name='TaggedPositionRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='civictechprojects.ProjectPosition')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='civictechprojects_taggedpositionrole_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='projectposition',
name='position_role',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='civictechprojects.TaggedPositionRole', to='taggit.Tag', verbose_name='Tags'),
),
]
|
mit
| 4,902,892,915,200,710,000
| 41.52381
| 181
| 0.620941
| false
| 4.153488
| false
| false
| false
|
nanophotonics/nplab
|
nplab/instrument/monochromator/bentham_DTMc300.py
|
1
|
2962
|
from __future__ import print_function
import ctypes
from nplab.instrument import Instrument
from ctypes import CDLL, c_char_p,byref,c_char, POINTER, ARRAY, WinDLL
import os
import numpy as np
import time
FILEPATH = os.path.realpath(__file__)
DIRPATH = os.path.dirname(FILEPATH)
ATTRS_PATH = "{0}\\{1}".format(DIRPATH,"bentham_DTMc300_attributes.atr")
CONFIG_PATH = "{0}\\{1}".format(DIRPATH,"bentham_DTMc300_config.cfg")
DLL_PATH="{0}\\{1}".format(DIRPATH,"bentham_instruments_dlls\\Win32\\benhw32_fastcall.dll") #NOTE: hardcoded to use 64 bit DLL, for 32bit use the ones in Win32
# print DLL_PATH
def read_tokens():
'''
Text tokens are mapped to integers in the bentham_dlltokens.h file
read the file and make the dictionary of tokens
'''
token_map = {}
import re
definition_pattern = re.compile("#define.*")
token_filepath = os.path.normpath(DIRPATH+"/bentham_dlltokens.h")
with open(token_filepath,"r") as f:
for line in f.readlines():
line = line.strip("\n")
if bool(definition_pattern.match(line))==True:
line_list = line.split(" ")
token_map.update({line_list[1]:int(line_list[2])})
return token_map
class Bentham_DTMc300(Instrument):
def __init__(self):
super(Bentham_DTMc300,self).__init__()
self.dll = WinDLL(DLL_PATH)
self.token_map = read_tokens()
error_report = c_char_p("")
response = self.dll.BI_build_system_model(c_char_p(CONFIG_PATH),error_report)
print("Error report",error_report)
print("BI_build_system_model:",response)
response = self.dll.BI_load_setup(c_char_p(ATTRS_PATH))
print("BI_load_setup:",response)
response = self.dll.BI_initialise(None)
print("BI_initialise:",response)
response = self.dll.BI_park(None)
print("BI_park:",response)
self.components = self.get_component_list()
def get_component_list(self):
mylist = (ctypes.c_char*100)()
response = self.dll.BI_get_component_list(ctypes.byref(mylist))
components = [k for k in ("".join([c for c in mylist if c != '\x00'])).split(",") if k != '']
print("BI_get_component_list:",response, components)
return components
def get(self,item_id,token,index):
value = ctypes.c_double(0.0)
print("id:{0}, token:{1}, index:{2}".format(item_id,token,index))
response = self.dll.BI_get(c_char_p(item_id),ctypes.c_int32(self.token_map[token]),ctypes.c_int32(index),ctypes.byref(value))
print("BI_get", response)
return value.value
def get_wavelength(self,token="mono"):
wavelength = self.get(item_id="mono",token="MonochromatorCurrentWL",index=0)
return wavelength
def set_wavelength(self,wavelength):
delay = ctypes.c_double(0.0)
response = self.dll.BI_select_wavelength(ctypes.c_double(wavelength), ctypes.byref(delay))
time.sleep(0.3) #sleep for 300ms - ensure everything has moved
return
if __name__ == "__main__":
m = Bentham_DTMc300()
initial = m.get_wavelength()
m.set_wavelength(0)
final = m.get_wavelength()
print("Initial, Final:", initial, final)
print("DONE")
|
gpl-3.0
| -9,105,023,763,205,697,000
| 31.56044
| 159
| 0.698177
| false
| 2.823642
| false
| false
| false
|
trdarr/apk2
|
import/import.py
|
1
|
1792
|
import urllib
from io import BytesIO
from xml.etree import ElementTree
import psycopg2
import requests
from entity import Product, Store
def create_sql(table, entity):
entity_dict = entity.to_dict()
columns = ', '.join(entity_dict.keys())
placeholders = ', '.join(f'%({k})s' for k in entity_dict.keys())
insert_fragment = f'insert into {table} ({columns}) values ({placeholders})'
expressions = ', '.join(f'{k} = %({k})s' for k in entity_dict.keys())
update_fragment = f'on conflict (nr) do update set {expressions}'
return f'{insert_fragment} {update_fragment}'
def from_file(filename, tag_name):
with open(f'data/{filename}.xml') as xml_file:
for event, element in ElementTree.iterparse(xml_file):
if event == 'end' and element.tag == tag_name:
yield {child.tag: child.text for child in element.iter()}
element.clear()
def from_systembolaget(filename, tag_name):
base_url = 'https://www.systembolaget.se/api/assortment'
url = '/'.join((base_url, filename, 'xml'))
response = requests.get(url)
if response.status_code != requests.codes.ok:
raise Exception(f'Got {response.status_code} from <{url}>.')
for event, element in ElementTree.iterparse(BytesIO(response.content)):
if event == 'end' and element.tag == tag_name:
yield element
element.clear()
products = from_file('products', 'artikel')
stores = from_file('stores', 'ButikOmbud')
with psycopg2.connect(user='postgres', host='postgres') as connection:
with connection.cursor() as cursor:
for product in (Product(**product) for product in products):
cursor.execute(create_sql('products', product), product.to_dict())
for store in (Store(**store) for store in stores):
cursor.execute(create_sql('stores', store), store.to_dict())
|
apache-2.0
| 6,123,053,057,215,423,000
| 34.137255
| 78
| 0.68471
| false
| 3.486381
| false
| false
| false
|
dineshsonachalam/ocr
|
setup.py
|
1
|
1380
|
from setuptools import setup, find_packages
import requests
import semantic_version
install_requires = [
'boto3>=1.17.78',
'botocore>=1.20.78',
'simplejson==3.17.2'
]
def get_LucidDynamodb_version():
url = "https://pypi.org/pypi/LucidDynamodb/json"
response = requests.request("GET", url, headers={}, data={})
result = response.json()
LucidDynamodb_version = str(result.get("info").get("version"))
current_version = semantic_version.Version(LucidDynamodb_version)
next_version = current_version.next_patch()
return next_version
setup(
name="LucidDynamodb",
version=str(get_LucidDynamodb_version()),
author="Dinesh Sonachalam",
author_email="dineshsonachalam@gmail.com",
description="A simple Python wrapper to AWS Dynamodb",
url="https://github.com/dineshsonachalam/Lucid-Dynamodb",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
zip_safe=False,
license='MIT',
keywords='python dynamodb amazon',
python_requires=">=3.1",
install_requires=install_requires,
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
mit
| 6,862,585,355,070,034,000
| 33.525
| 69
| 0.635507
| false
| 3.631579
| false
| false
| false
|
gdm/skew
|
skew/resources/aws/s3.py
|
1
|
1420
|
# Copyright (c) 2014 Scopely, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
from skew.resources.aws import AWSResource
class Bucket(AWSResource):
class Meta(object):
service = 's3'
type = 'bucket'
enum_spec = ('list_buckets', 'Buckets[]', None)
detail_spec = ('list_objects', 'Bucket', 'Contents[]')
id = 'Name'
filter_name = None
name = 'BucketName'
date = 'CreationDate'
dimension = None
def __init__(self, client, data, query=None):
super(Bucket, self).__init__(client, data, query)
self._data = data
self._keys = []
def __iter__(self):
detail_op, param_name, detail_path = self.Meta.detail_spec
params = {param_name: self.id}
if not self._keys:
data = self._client.call(detail_op, **params)
self._keys = jmespath.search(detail_path, data)
for key in self._keys:
yield key
|
apache-2.0
| 7,790,654,558,427,385,000
| 32.023256
| 72
| 0.628873
| false
| 3.806971
| false
| false
| false
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/mail/imap4.py
|
1
|
185338
|
# -*- test-case-name: twisted.mail.test.test_imap -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An IMAP4 protocol implementation
API Stability: Semi-stable
@author: U{Jp Calderone<mailto:exarkun@twistedmatrix.com>}
To do::
Suspend idle timeout while server is processing
Use an async message parser instead of buffering in memory
Figure out a way to not queue multi-message client requests (Flow? A simple callback?)
Clarify some API docs (Query, etc)
Make APPEND recognize (again) non-existent mailboxes before accepting the literal
"""
from __future__ import nested_scopes
from __future__ import generators
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import defer
from twisted.internet import error
from twisted.internet.defer import maybeDeferred
from twisted.python import log, util, failure, text
from twisted.internet import interfaces
from twisted import cred
import twisted.cred.error
import twisted.cred.credentials
import rfc822
import base64
import binascii
import time
import hmac
import re
import tempfile
import string
import time
import random
import types
import sys
from zope.interface import implements, Interface
import email.Utils
try:
import cStringIO as StringIO
except:
import StringIO
class MessageSet(object):
"""
Essentially an infinite bitfield, with some extra features.
@type getnext: Function taking C{int} returning C{int}
@ivar getnext: A function that returns the next message number,
used when iterating through the MessageSet. By default, a function
returning the next integer is supplied, but as this can be rather
inefficient for sparse UID iterations, it is recommended to supply
one when messages are requested by UID. The argument is provided
as a hint to the implementation and may be ignored if it makes sense
to do so (eg, if an iterator is being used that maintains its own
state, it is guaranteed that it will not be called out-of-order).
"""
_empty = []
def __init__(self, start=_empty, end=_empty):
"""
Create a new MessageSet()
@type start: Optional C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
self._last = self._empty # Last message/UID in use
self.ranges = [] # List of ranges included
self.getnext = lambda x: x+1 # A function which will return the next
# message id. Handy for UID requests.
if start is self._empty:
return
if isinstance(start, types.ListType):
self.ranges = start[:]
self.clean()
else:
self.add(start,end)
# Ooo. A property.
def last():
def _setLast(self,value):
if self._last is not self._empty:
raise ValueError("last already set")
self._last = value
for i,(l,h) in enumerate(self.ranges):
if l is not None:
break # There are no more Nones after this
l = value
if h is None:
h = value
if l > h:
l, h = h, l
self.ranges[i] = (l,h)
self.clean()
def _getLast(self):
return self._last
doc = '''
"Highest" message number, refered to by "*".
Must be set before attempting to use the MessageSet.
'''
return _getLast, _setLast, None, doc
last = property(*last())
def add(self, start, end=_empty):
"""
Add another range
@type start: C{int}
@param start: Start of range, or only message number
@type end: Optional C{int}
@param end: End of range.
"""
if end is self._empty:
end = start
if self._last is not self._empty:
if start is None:
start = self.last
if end is None:
end = self.last
if start > end:
# Try to keep in low, high order if possible
# (But we don't know what None means, this will keep
# None at the start of the ranges list)
start, end = end, start
self.ranges.append((start,end))
self.clean()
def __add__(self, other):
if isinstance(other, MessageSet):
ranges = self.ranges + other.ranges
return MessageSet(ranges)
else:
res = MessageSet(self.ranges)
try:
res.add(*other)
except TypeError:
res.add(other)
return res
def extend(self, other):
if isinstance(other, MessageSet):
self.ranges.extend(other.ranges)
self.clean()
else:
try:
self.add(*other)
except TypeError:
self.add(other)
return self
def clean(self):
"""
Clean ranges list, combining adjacent ranges
"""
self.ranges.sort()
oldl, oldh = None, None
for i,(l,h) in enumerate(self.ranges):
if l is None:
continue
# l is >= oldl and h is >= oldh due to sort()
if oldl is not None and l <= oldh+1:
l = oldl
h = max(oldh,h)
self.ranges[i-1] = None
self.ranges[i] = (l,h)
oldl,oldh = l,h
self.ranges = filter(None, self.ranges)
def __contains__(self, value):
"""
May raise TypeError if we encounter unknown "high" values
"""
for l,h in self.ranges:
if l is None:
raise TypeError(
"Can't determine membership; last value not set")
if l <= value <= h:
return True
return False
def _iterator(self):
for l,h in self.ranges:
l = self.getnext(l-1)
while l <= h:
yield l
l = self.getnext(l)
if l is None:
break
def __iter__(self):
if self.ranges and self.ranges[0][0] is None:
raise TypeError("Can't iterate; last value not set")
return self._iterator()
def __len__(self):
res = 0
for l, h in self.ranges:
if l is None:
raise TypeError("Can't size object; last value not set")
res += (h - l) + 1
return res
def __str__(self):
p = []
for low, high in self.ranges:
if low == high:
if low is None:
p.append('*')
else:
p.append(str(low))
elif low is None:
p.append('%d:*' % (high,))
else:
p.append('%d:%d' % (low, high))
return ','.join(p)
def __repr__(self):
return '<MessageSet %s>' % (str(self),)
def __eq__(self, other):
if isinstance(other, MessageSet):
return self.ranges == other.ranges
return False
class LiteralString:
def __init__(self, size, defered):
self.size = size
self.data = []
self.defer = defered
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.append(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.append(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.defer.callback((''.join(self.data), line))
class LiteralFile:
_memoryFileLimit = 1024 * 1024 * 10
def __init__(self, size, defered):
self.size = size
self.defer = defered
if size > self._memoryFileLimit:
self.data = tempfile.TemporaryFile()
else:
self.data = StringIO.StringIO()
def write(self, data):
self.size -= len(data)
passon = None
if self.size > 0:
self.data.write(data)
else:
if self.size:
data, passon = data[:self.size], data[self.size:]
else:
passon = ''
if data:
self.data.write(data)
return passon
def callback(self, line):
"""
Call defered with data and rest of line
"""
self.data.seek(0,0)
self.defer.callback((self.data, line))
class WriteBuffer:
"""Buffer up a bunch of writes before sending them all to a transport at once.
"""
def __init__(self, transport, size=8192):
self.bufferSize = size
self.transport = transport
self._length = 0
self._writes = []
def write(self, s):
self._length += len(s)
self._writes.append(s)
if self._length > self.bufferSize:
self.flush()
def flush(self):
if self._writes:
self.transport.writeSequence(self._writes)
self._writes = []
self._length = 0
class Command:
_1_RESPONSES = ('CAPABILITY', 'FLAGS', 'LIST', 'LSUB', 'STATUS', 'SEARCH', 'NAMESPACE')
_2_RESPONSES = ('EXISTS', 'EXPUNGE', 'FETCH', 'RECENT')
_OK_RESPONSES = ('UIDVALIDITY', 'READ-WRITE', 'READ-ONLY', 'UIDNEXT', 'PERMANENTFLAGS')
defer = None
def __init__(self, command, args=None, wantResponse=(),
continuation=None, *contArgs, **contKw):
self.command = command
self.args = args
self.wantResponse = wantResponse
self.continuation = lambda x: continuation(x, *contArgs, **contKw)
self.lines = []
def format(self, tag):
if self.args is None:
return ' '.join((tag, self.command))
return ' '.join((tag, self.command, self.args))
def finish(self, lastLine, unusedCallback):
send = []
unuse = []
for L in self.lines:
names = parseNestedParens(L)
N = len(names)
if (N >= 1 and names[0] in self._1_RESPONSES or
N >= 2 and names[0] == 'OK' and isinstance(names[1], types.ListType) and names[1][0] in self._OK_RESPONSES):
send.append(L)
elif N >= 3 and names[1] in self._2_RESPONSES:
if isinstance(names[2], list) and len(names[2]) >= 1 and names[2][0] == 'FLAGS' and 'FLAGS' not in self.args:
unuse.append(L)
else:
send.append(L)
elif N >= 2 and names[1] in self._2_RESPONSES:
send.append(L)
else:
unuse.append(L)
d, self.defer = self.defer, None
d.callback((send, lastLine))
if unuse:
unusedCallback(unuse)
class LOGINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
self.challenges = ['Password\0', 'User Name\0']
self.responses = ['password', 'username']
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return self.challenges.pop()
def setResponse(self, response):
setattr(self, self.responses.pop(), response)
def moreChallenges(self):
return bool(self.challenges)
class PLAINCredentials(cred.credentials.UsernamePassword):
def __init__(self):
cred.credentials.UsernamePassword.__init__(self, None, None)
def getChallenge(self):
return ''
def setResponse(self, response):
parts = response[:-1].split('\0', 1)
if len(parts) != 2:
raise IllegalClientResponse("Malformed Response - wrong number of parts")
self.username, self.password = parts
def moreChallenges(self):
return False
class IMAP4Exception(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class IllegalClientResponse(IMAP4Exception): pass
class IllegalOperation(IMAP4Exception): pass
class IllegalMailboxEncoding(IMAP4Exception): pass
class IMailboxListener(Interface):
"""Interface for objects interested in mailbox events"""
def modeChanged(writeable):
"""Indicates that the write status of a mailbox has changed.
@type writeable: C{bool}
@param writeable: A true value if write is now allowed, false
otherwise.
"""
def flagsChanged(newFlags):
"""Indicates that the flags of one or more messages have changed.
@type newFlags: C{dict}
@param newFlags: A mapping of message identifiers to tuples of flags
now set on that message.
"""
def newMessages(exists, recent):
"""Indicates that the number of messages in a mailbox has changed.
@type exists: C{int} or C{None}
@param exists: The total number of messages now in this mailbox.
If the total number of messages has not changed, this should be
C{None}.
@type recent: C{int}
@param recent: The number of messages now flagged \\Recent.
If the number of recent messages has not changed, this should be
C{None}.
"""
class IMAP4Server(basic.LineReceiver, policies.TimeoutMixin):
"""
Protocol implementation for an IMAP4rev1 server.
The server can be in any of four states:
- Non-authenticated
- Authenticated
- Selected
- Logout
"""
implements(IMailboxListener)
# Identifier for this server software
IDENT = 'Twisted IMAP4rev1 Ready'
# Number of seconds before idle timeout
# Initially 1 minute. Raised to 30 minutes after login.
timeOut = 60
POSTAUTH_TIMEOUT = 60 * 30
# Whether STARTTLS has been issued successfully yet or not.
startedTLS = False
# Whether our transport supports TLS
canStartTLS = False
# Mapping of tags to commands we have received
tags = None
# The object which will handle logins for us
portal = None
# The account object for this connection
account = None
# Logout callback
_onLogout = None
# The currently selected mailbox
mbox = None
# Command data to be processed when literal data is received
_pendingLiteral = None
# Maximum length to accept for a "short" string literal
_literalStringLimit = 4096
# IChallengeResponse factories for AUTHENTICATE command
challengers = None
state = 'unauth'
parseState = 'command'
def __init__(self, chal = None, contextFactory = None, scheduler = None):
if chal is None:
chal = {}
self.challengers = chal
self.ctx = contextFactory
if scheduler is None:
scheduler = iterateInReactor
self._scheduler = scheduler
self._queuedAsync = []
def capabilities(self):
cap = {'AUTH': self.challengers.keys()}
if self.ctx and self.canStartTLS:
if not self.startedTLS and interfaces.ISSLTransport(self.transport, None) is None:
cap['LOGINDISABLED'] = None
cap['STARTTLS'] = None
cap['NAMESPACE'] = None
cap['IDLE'] = None
return cap
def connectionMade(self):
self.tags = {}
self.canStartTLS = interfaces.ITLSTransport(self.transport, None) is not None
self.setTimeout(self.timeOut)
self.sendServerGreeting()
def connectionLost(self, reason):
self.setTimeout(None)
if self._onLogout:
self._onLogout()
self._onLogout = None
def timeoutConnection(self):
self.sendLine('* BYE Autologout; connection idle too long')
self.transport.loseConnection()
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'timeout'
def rawDataReceived(self, data):
self.resetTimeout()
passon = self._pendingLiteral.write(data)
if passon is not None:
self.setLineMode(passon)
# Avoid processing commands while buffers are being dumped to
# our transport
blocked = None
def _unblock(self):
commands = self.blocked
self.blocked = None
while commands and self.blocked is None:
self.lineReceived(commands.pop(0))
if self.blocked is not None:
self.blocked.extend(commands)
# def sendLine(self, line):
# print 'C:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
# print 'S:', repr(line)
if self.blocked is not None:
self.blocked.append(line)
return
self.resetTimeout()
f = getattr(self, 'parse_' + self.parseState)
try:
f(line)
except Exception, e:
self.sendUntaggedResponse('BAD Server error: ' + str(e))
log.err()
def parse_command(self, line):
args = line.split(None, 2)
rest = None
if len(args) == 3:
tag, cmd, rest = args
elif len(args) == 2:
tag, cmd = args
elif len(args) == 1:
tag = args[0]
self.sendBadResponse(tag, 'Missing command')
return None
else:
self.sendBadResponse(None, 'Null command')
return None
cmd = cmd.upper()
try:
return self.dispatchCommand(tag, cmd, rest)
except IllegalClientResponse, e:
self.sendBadResponse(tag, 'Illegal syntax: ' + str(e))
except IllegalOperation, e:
self.sendNegativeResponse(tag, 'Illegal operation: ' + str(e))
except IllegalMailboxEncoding, e:
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' + str(e))
def parse_pending(self, line):
d = self._pendingLiteral
self._pendingLiteral = None
self.parseState = 'command'
d.callback(line)
def dispatchCommand(self, tag, cmd, rest, uid=None):
f = self.lookupCommand(cmd)
if f:
fn = f[0]
parseargs = f[1:]
self.__doCommand(tag, fn, [self, tag], parseargs, rest, uid)
else:
self.sendBadResponse(tag, 'Unsupported command')
def lookupCommand(self, cmd):
return getattr(self, '_'.join((self.state, cmd.upper())), None)
def __doCommand(self, tag, handler, args, parseargs, line, uid):
for (i, arg) in enumerate(parseargs):
if callable(arg):
parseargs = parseargs[i+1:]
maybeDeferred(arg, self, line).addCallback(
self.__cbDispatch, tag, handler, args,
parseargs, uid).addErrback(self.__ebDispatch, tag)
return
else:
args.append(arg)
if line:
# Too many arguments
raise IllegalClientResponse("Too many arguments for command: " + repr(line))
if uid is not None:
handler(uid=uid, *args)
else:
handler(*args)
def __cbDispatch(self, (arg, rest), tag, fn, args, parseargs, uid):
args.append(arg)
self.__doCommand(tag, fn, args, parseargs, rest, uid)
def __ebDispatch(self, failure, tag):
if failure.check(IllegalClientResponse):
self.sendBadResponse(tag, 'Illegal syntax: ' + str(failure.value))
elif failure.check(IllegalOperation):
self.sendNegativeResponse(tag, 'Illegal operation: ' +
str(failure.value))
elif failure.check(IllegalMailboxEncoding):
self.sendNegativeResponse(tag, 'Illegal mailbox name: ' +
str(failure.value))
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def _stringLiteral(self, size):
if size > self._literalStringLimit:
raise IllegalClientResponse(
"Literal too long! I accept at most %d octets" %
(self._literalStringLimit,))
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralString(size, d)
self.sendContinuationRequest('Ready for %d octets of text' % size)
self.setRawMode()
return d
def _fileLiteral(self, size):
d = defer.Deferred()
self.parseState = 'pending'
self._pendingLiteral = LiteralFile(size, d)
self.sendContinuationRequest('Ready for %d octets of data' % size)
self.setRawMode()
return d
def arg_astring(self, line):
"""
Parse an astring from the line, return (arg, rest), possibly
via a deferred (to handle literals)
"""
line = line.strip()
if not line:
raise IllegalClientResponse("Missing argument")
d = None
arg, rest = None, None
if line[0] == '"':
try:
spam, arg, rest = line.split('"',2)
rest = rest[1:] # Strip space
except ValueError:
raise IllegalClientResponse("Unmatched quotes")
elif line[0] == '{':
# literal
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
d = self._stringLiteral(size)
else:
arg = line.split(' ',1)
if len(arg) == 1:
arg.append('')
arg, rest = arg
return d or (arg, rest)
# ATOM: Any CHAR except ( ) { % * " \ ] CTL SP (CHAR is 7bit)
atomre = re.compile(r'(?P<atom>[^\](){%*"\\\x00-\x20\x80-\xff]+)( (?P<rest>.*$)|$)')
def arg_atom(self, line):
"""
Parse an atom from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
m = self.atomre.match(line)
if m:
return m.group('atom'), m.group('rest')
else:
raise IllegalClientResponse("Malformed ATOM")
def arg_plist(self, line):
"""
Parse a (non-nested) parenthesised list from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != "(":
raise IllegalClientResponse("Missing parenthesis")
i = line.find(")")
if i == -1:
raise IllegalClientResponse("Mismatched parenthesis")
return (parseNestedParens(line[1:i],0), line[i+2:])
def arg_literal(self, line):
"""
Parse a literal from the line
"""
if not line:
raise IllegalClientResponse("Missing argument")
if line[0] != '{':
raise IllegalClientResponse("Missing literal")
if line[-1] != '}':
raise IllegalClientResponse("Malformed literal")
try:
size = int(line[1:-1])
except ValueError:
raise IllegalClientResponse("Bad literal size: " + line[1:-1])
return self._fileLiteral(size)
def arg_searchkeys(self, line):
"""
searchkeys
"""
query = parseNestedParens(line)
# XXX Should really use list of search terms and parse into
# a proper tree
return (query, '')
def arg_seqset(self, line):
"""
sequence-set
"""
rest = ''
arg = line.split(' ',1)
if len(arg) == 2:
rest = arg[1]
arg = arg[0]
try:
return (parseIdList(arg), rest)
except IllegalIdentifierError, e:
raise IllegalClientResponse("Bad message number " + str(e))
def arg_fetchatt(self, line):
"""
fetch-att
"""
p = _FetchParser()
p.parseString(line)
return (p.result, '')
def arg_flaglist(self, line):
"""
Flag part of store-att-flag
"""
flags = []
if line[0] == '(':
if line[-1] != ')':
raise IllegalClientResponse("Mismatched parenthesis")
line = line[1:-1]
while line:
m = self.atomre.search(line)
if not m:
raise IllegalClientResponse("Malformed flag")
if line[0] == '\\' and m.start() == 1:
flags.append('\\' + m.group('atom'))
elif m.start() == 0:
flags.append(m.group('atom'))
else:
raise IllegalClientResponse("Malformed flag")
line = m.group('rest')
return (flags, '')
def arg_line(self, line):
"""
Command line of UID command
"""
return (line, '')
def opt_plist(self, line):
"""
Optional parenthesised list
"""
if line.startswith('('):
return self.arg_plist(line)
else:
return (None, line)
def opt_datetime(self, line):
"""
Optional date-time string
"""
if line.startswith('"'):
try:
spam, date, rest = line.split('"',2)
except IndexError:
raise IllegalClientResponse("Malformed date-time")
return (date, rest[1:])
else:
return (None, line)
def opt_charset(self, line):
"""
Optional charset of SEARCH command
"""
if line[:7].upper() == 'CHARSET':
arg = line.split(' ',2)
if len(arg) == 1:
raise IllegalClientResponse("Missing charset identifier")
if len(arg) == 2:
arg.append('')
spam, arg, rest = arg
return (arg, rest)
else:
return (None, line)
def sendServerGreeting(self):
msg = '[CAPABILITY %s] %s' % (' '.join(self.listCapabilities()), self.IDENT)
self.sendPositiveResponse(message=msg)
def sendBadResponse(self, tag = None, message = ''):
self._respond('BAD', tag, message)
def sendPositiveResponse(self, tag = None, message = ''):
self._respond('OK', tag, message)
def sendNegativeResponse(self, tag = None, message = ''):
self._respond('NO', tag, message)
def sendUntaggedResponse(self, message, async=False):
if not async or (self.blocked is None):
self._respond(message, None, None)
else:
self._queuedAsync.append(message)
def sendContinuationRequest(self, msg = 'Ready for additional command text'):
if msg:
self.sendLine('+ ' + msg)
else:
self.sendLine('+')
def _respond(self, state, tag, message):
if state in ('OK', 'NO', 'BAD') and self._queuedAsync:
lines = self._queuedAsync
self._queuedAsync = []
for msg in lines:
self._respond(msg, None, None)
if not tag:
tag = '*'
if message:
self.sendLine(' '.join((tag, state, message)))
else:
self.sendLine(' '.join((tag, state)))
def listCapabilities(self):
caps = ['IMAP4rev1']
for c, v in self.capabilities().iteritems():
if v is None:
caps.append(c)
elif len(v):
caps.extend([('%s=%s' % (c, cap)) for cap in v])
return caps
def do_CAPABILITY(self, tag):
self.sendUntaggedResponse('CAPABILITY ' + ' '.join(self.listCapabilities()))
self.sendPositiveResponse(tag, 'CAPABILITY completed')
unauth_CAPABILITY = (do_CAPABILITY,)
auth_CAPABILITY = unauth_CAPABILITY
select_CAPABILITY = unauth_CAPABILITY
logout_CAPABILITY = unauth_CAPABILITY
def do_LOGOUT(self, tag):
self.sendUntaggedResponse('BYE Nice talking to you')
self.sendPositiveResponse(tag, 'LOGOUT successful')
self.transport.loseConnection()
unauth_LOGOUT = (do_LOGOUT,)
auth_LOGOUT = unauth_LOGOUT
select_LOGOUT = unauth_LOGOUT
logout_LOGOUT = unauth_LOGOUT
def do_NOOP(self, tag):
self.sendPositiveResponse(tag, 'NOOP No operation performed')
unauth_NOOP = (do_NOOP,)
auth_NOOP = unauth_NOOP
select_NOOP = unauth_NOOP
logout_NOOP = unauth_NOOP
def do_AUTHENTICATE(self, tag, args):
args = args.upper().strip()
if args not in self.challengers:
self.sendNegativeResponse(tag, 'AUTHENTICATE method unsupported')
else:
self.authenticate(self.challengers[args](), tag)
unauth_AUTHENTICATE = (do_AUTHENTICATE, arg_atom)
def authenticate(self, chal, tag):
if self.portal is None:
self.sendNegativeResponse(tag, 'Temporary authentication failure')
return
self._setupChallenge(chal, tag)
def _setupChallenge(self, chal, tag):
try:
challenge = chal.getChallenge()
except Exception, e:
self.sendBadResponse(tag, 'Server error: ' + str(e))
else:
coded = base64.encodestring(challenge)[:-1]
self.parseState = 'pending'
self._pendingLiteral = defer.Deferred()
self.sendContinuationRequest(coded)
self._pendingLiteral.addCallback(self.__cbAuthChunk, chal, tag)
self._pendingLiteral.addErrback(self.__ebAuthChunk, tag)
def __cbAuthChunk(self, result, chal, tag):
try:
uncoded = base64.decodestring(result)
except binascii.Error:
raise IllegalClientResponse("Malformed Response - not base64")
chal.setResponse(uncoded)
if chal.moreChallenges():
self._setupChallenge(chal, tag)
else:
self.portal.login(chal, None, IAccount).addCallbacks(
self.__cbAuthResp,
self.__ebAuthResp,
(tag,), None, (tag,), None
)
def __cbAuthResp(self, (iface, avatar, logout), tag):
assert iface is IAccount, "IAccount is the only supported interface"
self.account = avatar
self.state = 'auth'
self._onLogout = logout
self.sendPositiveResponse(tag, 'Authentication successful')
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebAuthResp(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'Authentication failed: unauthorized')
elif failure.check(cred.error.UnhandledCredentials):
self.sendNegativeResponse(tag, 'Authentication failed: server misconfigured')
else:
self.sendBadResponse(tag, 'Server error: login failed unexpectedly')
log.err(failure)
def __ebAuthChunk(self, failure, tag):
self.sendNegativeResponse(tag, 'Authentication failed: ' + str(failure.value))
def do_STARTTLS(self, tag):
if self.startedTLS:
self.sendNegativeResponse(tag, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendPositiveResponse(tag, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
self.challengers = self.challengers.copy()
if 'LOGIN' not in self.challengers:
self.challengers['LOGIN'] = LOGINCredentials
if 'PLAIN' not in self.challengers:
self.challengers['PLAIN'] = PLAINCredentials
else:
self.sendNegativeResponse(tag, 'TLS not available')
unauth_STARTTLS = (do_STARTTLS,)
def do_LOGIN(self, tag, user, passwd):
if 'LOGINDISABLED' in self.capabilities():
self.sendBadResponse(tag, 'LOGIN is disabled before STARTTLS')
return
maybeDeferred(self.authenticateLogin, user, passwd
).addCallback(self.__cbLogin, tag
).addErrback(self.__ebLogin, tag
)
unauth_LOGIN = (do_LOGIN, arg_astring, arg_astring)
def authenticateLogin(self, user, passwd):
"""Lookup the account associated with the given parameters
Override this method to define the desired authentication behavior.
The default behavior is to defer authentication to C{self.portal}
if it is not None, or to deny the login otherwise.
@type user: C{str}
@param user: The username to lookup
@type passwd: C{str}
@param passwd: The password to login with
"""
if self.portal:
return self.portal.login(
cred.credentials.UsernamePassword(user, passwd),
None, IAccount
)
raise cred.error.UnauthorizedLogin()
def __cbLogin(self, (iface, avatar, logout), tag):
if iface is not IAccount:
self.sendBadResponse(tag, 'Server error: login returned unexpected value')
log.err("__cbLogin called with %r, IAccount expected" % (iface,))
else:
self.account = avatar
self._onLogout = logout
self.sendPositiveResponse(tag, 'LOGIN succeeded')
self.state = 'auth'
self.setTimeout(self.POSTAUTH_TIMEOUT)
def __ebLogin(self, failure, tag):
if failure.check(cred.error.UnauthorizedLogin):
self.sendNegativeResponse(tag, 'LOGIN failed')
else:
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
log.err(failure)
def do_NAMESPACE(self, tag):
personal = public = shared = None
np = INamespacePresenter(self.account, None)
if np is not None:
personal = np.getPersonalNamespaces()
public = np.getSharedNamespaces()
shared = np.getSharedNamespaces()
self.sendUntaggedResponse('NAMESPACE ' + collapseNestedLists([personal, public, shared]))
self.sendPositiveResponse(tag, "NAMESPACE command completed")
auth_NAMESPACE = (do_NAMESPACE,)
select_NAMESPACE = auth_NAMESPACE
def _parseMbox(self, name):
if isinstance(name, unicode):
return name
try:
return name.decode('imap4-utf-7')
except:
log.err()
raise IllegalMailboxEncoding(name)
def _selectWork(self, tag, name, rw, cmdName):
if self.mbox:
self.mbox.removeListener(self)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
maybeDeferred(cmbx.close).addErrback(log.err)
self.mbox = None
self.state = 'auth'
name = self._parseMbox(name)
maybeDeferred(self.account.select, self._parseMbox(name), rw
).addCallback(self._cbSelectWork, cmdName, tag
).addErrback(self._ebSelectWork, cmdName, tag
)
def _ebSelectWork(self, failure, cmdName, tag):
self.sendBadResponse(tag, "%s failed: Server error" % (cmdName,))
log.err(failure)
def _cbSelectWork(self, mbox, cmdName, tag):
if mbox is None:
self.sendNegativeResponse(tag, 'No such mailbox')
return
if '\\noselect' in [s.lower() for s in mbox.getFlags()]:
self.sendNegativeResponse(tag, 'Mailbox cannot be selected')
return
flags = mbox.getFlags()
self.sendUntaggedResponse(str(mbox.getMessageCount()) + ' EXISTS')
self.sendUntaggedResponse(str(mbox.getRecentCount()) + ' RECENT')
self.sendUntaggedResponse('FLAGS (%s)' % ' '.join(flags))
self.sendPositiveResponse(None, '[UIDVALIDITY %d]' % mbox.getUIDValidity())
s = mbox.isWriteable() and 'READ-WRITE' or 'READ-ONLY'
mbox.addListener(self)
self.sendPositiveResponse(tag, '[%s] %s successful' % (s, cmdName))
self.state = 'select'
self.mbox = mbox
auth_SELECT = ( _selectWork, arg_astring, 1, 'SELECT' )
select_SELECT = auth_SELECT
auth_EXAMINE = ( _selectWork, arg_astring, 0, 'EXAMINE' )
select_EXAMINE = auth_EXAMINE
def do_IDLE(self, tag):
self.sendContinuationRequest(None)
self.parseTag = tag
self.lastState = self.parseState
self.parseState = 'idle'
def parse_idle(self, *args):
self.parseState = self.lastState
del self.lastState
self.sendPositiveResponse(self.parseTag, "IDLE terminated")
del self.parseTag
select_IDLE = ( do_IDLE, )
auth_IDLE = select_IDLE
def do_CREATE(self, tag, name):
name = self._parseMbox(name)
try:
result = self.account.create(name)
except MailboxException, c:
self.sendNegativeResponse(tag, str(c))
except:
self.sendBadResponse(tag, "Server error encountered while creating mailbox")
log.err()
else:
if result:
self.sendPositiveResponse(tag, 'Mailbox created')
else:
self.sendNegativeResponse(tag, 'Mailbox not created')
auth_CREATE = (do_CREATE, arg_astring)
select_CREATE = auth_CREATE
def do_DELETE(self, tag, name):
name = self._parseMbox(name)
if name.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot delete the inbox')
return
try:
self.account.delete(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while deleting mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox deleted')
auth_DELETE = (do_DELETE, arg_astring)
select_DELETE = auth_DELETE
def do_RENAME(self, tag, oldname, newname):
oldname, newname = [self._parseMbox(n) for n in oldname, newname]
if oldname.lower() == 'inbox' or newname.lower() == 'inbox':
self.sendNegativeResponse(tag, 'You cannot rename the inbox, or rename another mailbox to inbox.')
return
try:
self.account.rename(oldname, newname)
except TypeError:
self.sendBadResponse(tag, 'Invalid command syntax')
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while renaming mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Mailbox renamed')
auth_RENAME = (do_RENAME, arg_astring, arg_astring)
select_RENAME = auth_RENAME
def do_SUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.subscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while subscribing to mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Subscribed')
auth_SUBSCRIBE = (do_SUBSCRIBE, arg_astring)
select_SUBSCRIBE = auth_SUBSCRIBE
def do_UNSUBSCRIBE(self, tag, name):
name = self._parseMbox(name)
try:
self.account.unsubscribe(name)
except MailboxException, m:
self.sendNegativeResponse(tag, str(m))
except:
self.sendBadResponse(tag, "Server error encountered while unsubscribing from mailbox")
log.err()
else:
self.sendPositiveResponse(tag, 'Unsubscribed')
auth_UNSUBSCRIBE = (do_UNSUBSCRIBE, arg_astring)
select_UNSUBSCRIBE = auth_UNSUBSCRIBE
def _listWork(self, tag, ref, mbox, sub, cmdName):
mbox = self._parseMbox(mbox)
maybeDeferred(self.account.listMailboxes, ref, mbox
).addCallback(self._cbListWork, tag, sub, cmdName
).addErrback(self._ebListWork, tag
)
def _cbListWork(self, mailboxes, tag, sub, cmdName):
for (name, box) in mailboxes:
if not sub or self.account.isSubscribed(name):
flags = box.getFlags()
delim = box.getHierarchicalDelimiter()
resp = (DontQuoteMe(cmdName), map(DontQuoteMe, flags), delim, name.encode('imap4-utf-7'))
self.sendUntaggedResponse(collapseNestedLists(resp))
self.sendPositiveResponse(tag, '%s completed' % (cmdName,))
def _ebListWork(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while listing mailboxes.")
log.err(failure)
auth_LIST = (_listWork, arg_astring, arg_astring, 0, 'LIST')
select_LIST = auth_LIST
auth_LSUB = (_listWork, arg_astring, arg_astring, 1, 'LSUB')
select_LSUB = auth_LSUB
def do_STATUS(self, tag, mailbox, names):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox, 0
).addCallback(self._cbStatusGotMailbox, tag, mailbox, names
).addErrback(self._ebStatusGotMailbox, tag
)
def _cbStatusGotMailbox(self, mbox, tag, mailbox, names):
if mbox:
maybeDeferred(mbox.requestStatus, names).addCallbacks(
self.__cbStatus, self.__ebStatus,
(tag, mailbox), None, (tag, mailbox), None
)
else:
self.sendNegativeResponse(tag, "Could not open mailbox")
def _ebStatusGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_STATUS = (do_STATUS, arg_astring, arg_plist)
select_STATUS = auth_STATUS
def __cbStatus(self, status, tag, box):
line = ' '.join(['%s %s' % x for x in status.iteritems()])
self.sendUntaggedResponse('STATUS %s (%s)' % (box, line))
self.sendPositiveResponse(tag, 'STATUS complete')
def __ebStatus(self, failure, tag, box):
self.sendBadResponse(tag, 'STATUS %s failed: %s' % (box, str(failure.value)))
def do_APPEND(self, tag, mailbox, flags, date, message):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbAppendGotMailbox, tag, flags, date, message
).addErrback(self._ebAppendGotMailbox, tag
)
def _cbAppendGotMailbox(self, mbox, tag, flags, date, message):
if not mbox:
self.sendNegativeResponse(tag, '[TRYCREATE] No such mailbox')
return
d = mbox.addMessage(message, flags, date)
d.addCallback(self.__cbAppend, tag, mbox)
d.addErrback(self.__ebAppend, tag)
def _ebAppendGotMailbox(self, failure, tag):
self.sendBadResponse(tag, "Server error encountered while opening mailbox.")
log.err(failure)
auth_APPEND = (do_APPEND, arg_astring, opt_plist, opt_datetime,
arg_literal)
select_APPEND = auth_APPEND
def __cbAppend(self, result, tag, mbox):
self.sendUntaggedResponse('%d EXISTS' % mbox.getMessageCount())
self.sendPositiveResponse(tag, 'APPEND complete')
def __ebAppend(self, failure, tag):
self.sendBadResponse(tag, 'APPEND failed: ' + str(failure.value))
def do_CHECK(self, tag):
d = self.checkpoint()
if d is None:
self.__cbCheck(None, tag)
else:
d.addCallbacks(
self.__cbCheck,
self.__ebCheck,
callbackArgs=(tag,),
errbackArgs=(tag,)
)
select_CHECK = (do_CHECK,)
def __cbCheck(self, result, tag):
self.sendPositiveResponse(tag, 'CHECK completed')
def __ebCheck(self, failure, tag):
self.sendBadResponse(tag, 'CHECK failed: ' + str(failure.value))
def checkpoint(self):
"""Called when the client issues a CHECK command.
This should perform any checkpoint operations required by the server.
It may be a long running operation, but may not block. If it returns
a deferred, the client will only be informed of success (or failure)
when the deferred's callback (or errback) is invoked.
"""
return None
def do_CLOSE(self, tag):
d = None
if self.mbox.isWriteable():
d = maybeDeferred(self.mbox.expunge)
cmbx = ICloseableMailbox(self.mbox, None)
if cmbx is not None:
if d is not None:
d.addCallback(lambda result: cmbx.close())
else:
d = maybeDeferred(cmbx.close)
if d is not None:
d.addCallbacks(self.__cbClose, self.__ebClose, (tag,), None, (tag,), None)
else:
self.__cbClose(None, tag)
select_CLOSE = (do_CLOSE,)
def __cbClose(self, result, tag):
self.sendPositiveResponse(tag, 'CLOSE completed')
self.mbox.removeListener(self)
self.mbox = None
self.state = 'auth'
def __ebClose(self, failure, tag):
self.sendBadResponse(tag, 'CLOSE failed: ' + str(failure.value))
def do_EXPUNGE(self, tag):
if self.mbox.isWriteable():
maybeDeferred(self.mbox.expunge).addCallbacks(
self.__cbExpunge, self.__ebExpunge, (tag,), None, (tag,), None
)
else:
self.sendNegativeResponse(tag, 'EXPUNGE ignored on read-only mailbox')
select_EXPUNGE = (do_EXPUNGE,)
def __cbExpunge(self, result, tag):
for e in result:
self.sendUntaggedResponse('%d EXPUNGE' % e)
self.sendPositiveResponse(tag, 'EXPUNGE completed')
def __ebExpunge(self, failure, tag):
self.sendBadResponse(tag, 'EXPUNGE failed: ' + str(failure.value))
log.err(failure)
def do_SEARCH(self, tag, charset, query, uid=0):
sm = ISearchableMailbox(self.mbox, None)
if sm is not None:
maybeDeferred(sm.search, query, uid=uid).addCallbacks(
self.__cbSearch, self.__ebSearch,
(tag, self.mbox, uid), None, (tag,), None
)
else:
s = parseIdList('1:*')
maybeDeferred(self.mbox.fetch, s, uid=uid).addCallbacks(
self.__cbManualSearch, self.__ebSearch,
(tag, self.mbox, query, uid), None, (tag,), None
)
select_SEARCH = (do_SEARCH, opt_charset, arg_searchkeys)
def __cbSearch(self, result, tag, mbox, uid):
if uid:
result = map(mbox.getUID, result)
ids = ' '.join([str(i) for i in result])
self.sendUntaggedResponse('SEARCH ' + ids)
self.sendPositiveResponse(tag, 'SEARCH completed')
def __cbManualSearch(self, result, tag, mbox, query, uid, searchResults = None):
if searchResults is None:
searchResults = []
i = 0
for (i, (id, msg)) in zip(range(5), result):
if self.searchFilter(query, id, msg):
if uid:
searchResults.append(str(msg.getUID()))
else:
searchResults.append(str(id))
if i == 4:
from twisted.internet import reactor
reactor.callLater(0, self.__cbManualSearch, result, tag, mbox, query, uid, searchResults)
else:
if searchResults:
self.sendUntaggedResponse('SEARCH ' + ' '.join(searchResults))
self.sendPositiveResponse(tag, 'SEARCH completed')
def searchFilter(self, query, id, msg):
while query:
if not self.singleSearchStep(query, id, msg):
return False
return True
def singleSearchStep(self, query, id, msg):
q = query.pop(0)
if isinstance(q, list):
if not self.searchFilter(q, id, msg):
return False
else:
c = q.upper()
f = getattr(self, 'search_' + c)
if f:
if not f(query, id, msg):
return False
else:
# IMAP goes *out of its way* to be complex
# Sequence sets to search should be specified
# with a command, like EVERYTHING ELSE.
try:
m = parseIdList(c)
except:
log.err('Unknown search term: ' + c)
else:
if id not in m:
return False
return True
def search_ALL(self, query, id, msg):
return True
def search_ANSWERED(self, query, id, msg):
return '\\Answered' in msg.getFlags()
def search_BCC(self, query, id, msg):
bcc = msg.getHeaders(False, 'bcc').get('bcc', '')
return bcc.lower().find(query.pop(0).lower()) != -1
def search_BEFORE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) < date
def search_BODY(self, query, id, msg):
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_CC(self, query, id, msg):
cc = msg.getHeaders(False, 'cc').get('cc', '')
return cc.lower().find(query.pop(0).lower()) != -1
def search_DELETED(self, query, id, msg):
return '\\Deleted' in msg.getFlags()
def search_DRAFT(self, query, id, msg):
return '\\Draft' in msg.getFlags()
def search_FLAGGED(self, query, id, msg):
return '\\Flagged' in msg.getFlags()
def search_FROM(self, query, id, msg):
fm = msg.getHeaders(False, 'from').get('from', '')
return fm.lower().find(query.pop(0).lower()) != -1
def search_HEADER(self, query, id, msg):
hdr = query.pop(0).lower()
hdr = msg.getHeaders(False, hdr).get(hdr, '')
return hdr.lower().find(query.pop(0).lower()) != -1
def search_KEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_LARGER(self, query, id, msg):
return int(query.pop(0)) < msg.getSize()
def search_NEW(self, query, id, msg):
return '\\Recent' in msg.getFlags() and '\\Seen' not in msg.getFlags()
def search_NOT(self, query, id, msg):
return not self.singleSearchStep(query, id, msg)
def search_OLD(self, query, id, msg):
return '\\Recent' not in msg.getFlags()
def search_ON(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) == date
def search_OR(self, query, id, msg):
a = self.singleSearchStep(query, id, msg)
b = self.singleSearchStep(query, id, msg)
return a or b
def search_RECENT(self, query, id, msg):
return '\\Recent' in msg.getFlags()
def search_SEEN(self, query, id, msg):
return '\\Seen' in msg.getFlags()
def search_SENTBEFORE(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date < parseTime(query.pop(0))
def search_SENTON(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date[:3] == parseTime(query.pop(0))[:3]
def search_SENTSINCE(self, query, id, msg):
date = msg.getHeader(False, 'date').get('date', '')
date = rfc822.parsedate(date)
return date > parseTime(query.pop(0))
def search_SINCE(self, query, id, msg):
date = parseTime(query.pop(0))
return rfc822.parsedate(msg.getInternalDate()) > date
def search_SMALLER(self, query, id, msg):
return int(query.pop(0)) > msg.getSize()
def search_SUBJECT(self, query, id, msg):
subj = msg.getHeaders(False, 'subject').get('subject', '')
return subj.lower().find(query.pop(0).lower()) != -1
def search_TEXT(self, query, id, msg):
# XXX - This must search headers too
body = query.pop(0).lower()
return text.strFile(body, msg.getBodyFile(), False)
def search_TO(self, query, id, msg):
to = msg.getHeaders(False, 'to').get('to', '')
return to.lower().find(query.pop(0).lower()) != -1
def search_UID(self, query, id, msg):
c = query.pop(0)
m = parseIdList(c)
return msg.getUID() in m
def search_UNANSWERED(self, query, id, msg):
return '\\Answered' not in msg.getFlags()
def search_UNDELETED(self, query, id, msg):
return '\\Deleted' not in msg.getFlags()
def search_UNDRAFT(self, query, id, msg):
return '\\Draft' not in msg.getFlags()
def search_UNFLAGGED(self, query, id, msg):
return '\\Flagged' not in msg.getFlags()
def search_UNKEYWORD(self, query, id, msg):
query.pop(0)
return False
def search_UNSEEN(self, query, id, msg):
return '\\Seen' not in msg.getFlags()
def __ebSearch(self, failure, tag):
self.sendBadResponse(tag, 'SEARCH failed: ' + str(failure.value))
log.err(failure)
def do_FETCH(self, tag, messages, query, uid=0):
if query:
maybeDeferred(self.mbox.fetch, messages, uid=uid
).addCallback(iter
).addCallback(self.__cbFetch, tag, query, uid
).addErrback(self.__ebFetch, tag
)
else:
self.sendPositiveResponse(tag, 'FETCH complete')
select_FETCH = (do_FETCH, arg_seqset, arg_fetchatt)
def __cbFetch(self, results, tag, query, uid):
if self.blocked is None:
self.blocked = []
self._oldTimeout = self.setTimeout(None)
try:
id, msg = results.next()
except StopIteration:
# All results have been processed, deliver completion notification.
self.sendPositiveResponse(tag, 'FETCH completed')
# The idle timeout was suspended while we delivered results,
# restore it now.
self.setTimeout(self._oldTimeout)
del self._oldTimeout
# Instance state is now consistent again (ie, it is as though
# the fetch command never ran), so allow any pending blocked
# commands to execute.
self._unblock()
else:
self.spewMessage(id, msg, query, uid
).addCallback(lambda _: self.__cbFetch(results, tag, query, uid)
).addErrback(self.__ebSpewMessage
)
def __ebSpewMessage(self, failure):
# This indicates a programming error.
# There's no reliable way to indicate anything to the client, since we
# may have already written an arbitrary amount of data in response to
# the command.
log.err(failure)
self.transport.loseConnection()
def spew_envelope(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('ENVELOPE ' + collapseNestedLists([getEnvelope(msg)]))
def spew_flags(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('FLAGS ' + '(%s)' % (' '.join(msg.getFlags())))
def spew_internaldate(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
idate = msg.getInternalDate()
ttup = rfc822.parsedate_tz(idate)
if ttup is None:
log.msg("%d:%r: unpareseable internaldate: %r" % (id, msg, idate))
raise IMAP4Exception("Internal failure generating INTERNALDATE")
odate = time.strftime("%d-%b-%Y %H:%M:%S ", ttup[:9])
if ttup[9] is None:
odate = odate + "+0000"
else:
if ttup[9] >= 0:
sign = "+"
else:
sign = "-"
odate = odate + sign + string.zfill(str(((abs(ttup[9]) / 3600) * 100 + (abs(ttup[9]) % 3600) / 60)), 4)
_w('INTERNALDATE ' + _quote(odate))
def spew_rfc822header(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
hdrs = _formatHeaders(msg.getHeaders(True))
_w('RFC822.HEADER ' + _literal(hdrs))
def spew_rfc822text(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.TEXT ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
def spew_rfc822size(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822.SIZE ' + str(msg.getSize()))
def spew_rfc822(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('RFC822 ')
_f()
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()
).beginProducing(self.transport
)
return MessageProducer(msg, None, self._scheduler
).beginProducing(self.transport
)
def spew_uid(self, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
_w('UID ' + str(msg.getUID()))
def spew_bodystructure(self, id, msg, _w=None, _f=None):
_w('BODYSTRUCTURE ' + collapseNestedLists([getBodyStructure(msg, True)]))
def spew_body(self, part, id, msg, _w=None, _f=None):
if _w is None:
_w = self.transport.write
for p in part.part:
if msg.isMultipart():
msg = msg.getSubPart(p)
elif p > 0:
# Non-multipart messages have an implicit first part but no
# other parts - reject any request for any other part.
raise TypeError("Requested subpart of non-multipart message")
if part.header:
hdrs = msg.getHeaders(part.header.negate, *part.header.fields)
hdrs = _formatHeaders(hdrs)
_w(str(part) + ' ' + _literal(hdrs))
elif part.text:
_w(str(part) + ' ')
_f()
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
elif part.mime:
hdrs = _formatHeaders(msg.getHeaders(True))
_w(str(part) + ' ' + _literal(hdrs))
elif part.empty:
_w(str(part) + ' ')
_f()
if part.part:
return FileProducer(msg.getBodyFile()
).beginProducing(self.transport
)
else:
mf = IMessageFile(msg, None)
if mf is not None:
return FileProducer(mf.open()).beginProducing(self.transport)
return MessageProducer(msg, None, self._scheduler).beginProducing(self.transport)
else:
_w('BODY ' + collapseNestedLists([getBodyStructure(msg)]))
def spewMessage(self, id, msg, query, uid):
wbuf = WriteBuffer(self.transport)
write = wbuf.write
flush = wbuf.flush
def start():
write('* %d FETCH (' % (id,))
def finish():
write(')\r\n')
def space():
write(' ')
def spew():
seenUID = False
start()
for part in query:
if part.type == 'uid':
seenUID = True
if part.type == 'body':
yield self.spew_body(part, id, msg, write, flush)
else:
f = getattr(self, 'spew_' + part.type)
yield f(id, msg, write, flush)
if part is not query[-1]:
space()
if uid and not seenUID:
space()
yield self.spew_uid(id, msg, write, flush)
finish()
flush()
return self._scheduler(spew())
def __ebFetch(self, failure, tag):
log.err(failure)
self.sendBadResponse(tag, 'FETCH failed: ' + str(failure.value))
def do_STORE(self, tag, messages, mode, flags, uid=0):
mode = mode.upper()
silent = mode.endswith('SILENT')
if mode.startswith('+'):
mode = 1
elif mode.startswith('-'):
mode = -1
else:
mode = 0
maybeDeferred(self.mbox.store, messages, flags, mode, uid=uid).addCallbacks(
self.__cbStore, self.__ebStore, (tag, self.mbox, uid, silent), None, (tag,), None
)
select_STORE = (do_STORE, arg_seqset, arg_atom, arg_flaglist)
def __cbStore(self, result, tag, mbox, uid, silent):
if result and not silent:
for (k, v) in result.iteritems():
if uid:
uidstr = ' UID %d' % mbox.getUID(k)
else:
uidstr = ''
self.sendUntaggedResponse('%d FETCH (FLAGS (%s)%s)' %
(k, ' '.join(v), uidstr))
self.sendPositiveResponse(tag, 'STORE completed')
def __ebStore(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def do_COPY(self, tag, messages, mailbox, uid=0):
mailbox = self._parseMbox(mailbox)
maybeDeferred(self.account.select, mailbox
).addCallback(self._cbCopySelectedMailbox, tag, messages, mailbox, uid
).addErrback(self._ebCopySelectedMailbox, tag
)
select_COPY = (do_COPY, arg_seqset, arg_astring)
def _cbCopySelectedMailbox(self, mbox, tag, messages, mailbox, uid):
if not mbox:
self.sendNegativeResponse(tag, 'No such mailbox: ' + mailbox)
else:
maybeDeferred(self.mbox.fetch, messages, uid
).addCallback(self.__cbCopy, tag, mbox
).addCallback(self.__cbCopied, tag, mbox
).addErrback(self.__ebCopy, tag
)
def _ebCopySelectedMailbox(self, failure, tag):
self.sendBadResponse(tag, 'Server error: ' + str(failure.value))
def __cbCopy(self, messages, tag, mbox):
# XXX - This should handle failures with a rollback or something
addedDeferreds = []
addedIDs = []
failures = []
fastCopyMbox = IMessageCopier(mbox, None)
for (id, msg) in messages:
if fastCopyMbox is not None:
d = maybeDeferred(fastCopyMbox.copy, msg)
addedDeferreds.append(d)
continue
# XXX - The following should be an implementation of IMessageCopier.copy
# on an IMailbox->IMessageCopier adapter.
flags = msg.getFlags()
date = msg.getInternalDate()
body = IMessageFile(msg, None)
if body is not None:
bodyFile = body.open()
d = maybeDeferred(mbox.addMessage, bodyFile, flags, date)
else:
def rewind(f):
f.seek(0)
return f
buffer = tempfile.TemporaryFile()
d = MessageProducer(msg, buffer, self._scheduler
).beginProducing(None
).addCallback(lambda _, b=buffer, f=flags, d=date: mbox.addMessage(rewind(b), f, d)
)
addedDeferreds.append(d)
return defer.DeferredList(addedDeferreds)
def __cbCopied(self, deferredIds, tag, mbox):
ids = []
failures = []
for (status, result) in deferredIds:
if status:
ids.append(result)
else:
failures.append(result.value)
if failures:
self.sendNegativeResponse(tag, '[ALERT] Some messages were not copied')
else:
self.sendPositiveResponse(tag, 'COPY completed')
def __ebCopy(self, failure, tag):
self.sendBadResponse(tag, 'COPY failed:' + str(failure.value))
log.err(failure)
def do_UID(self, tag, command, line):
command = command.upper()
if command not in ('COPY', 'FETCH', 'STORE', 'SEARCH'):
raise IllegalClientResponse(command)
self.dispatchCommand(tag, command, line, uid=1)
select_UID = (do_UID, arg_atom, arg_line)
#
# IMailboxListener implementation
#
def modeChanged(self, writeable):
if writeable:
self.sendUntaggedResponse(message='[READ-WRITE]', async=True)
else:
self.sendUntaggedResponse(message='[READ-ONLY]', async=True)
def flagsChanged(self, newFlags):
for (mId, flags) in newFlags.iteritems():
msg = '%d FETCH (FLAGS (%s))' % (mId, ' '.join(flags))
self.sendUntaggedResponse(msg, async=True)
def newMessages(self, exists, recent):
if exists is not None:
self.sendUntaggedResponse('%d EXISTS' % exists, async=True)
if recent is not None:
self.sendUntaggedResponse('%d RECENT' % recent, async=True)
class UnhandledResponse(IMAP4Exception): pass
class NegativeResponse(IMAP4Exception): pass
class NoSupportedAuthentication(IMAP4Exception):
def __init__(self, serverSupports, clientSupports):
IMAP4Exception.__init__(self, 'No supported authentication schemes available')
self.serverSupports = serverSupports
self.clientSupports = clientSupports
def __str__(self):
return (IMAP4Exception.__str__(self)
+ ': Server supports %r, client supports %r'
% (self.serverSupports, self.clientSupports))
class IllegalServerResponse(IMAP4Exception): pass
TIMEOUT_ERROR = error.TimeoutError()
class IMAP4Client(basic.LineReceiver, policies.TimeoutMixin):
"""IMAP4 client protocol implementation
@ivar state: A string representing the state the connection is currently
in.
"""
implements(IMailboxListener)
tags = None
waiting = None
queued = None
tagID = 1
state = None
startedTLS = False
# Number of seconds to wait before timing out a connection.
# If the number is <= 0 no timeout checking will be performed.
timeout = 0
# Capabilities are not allowed to change during the session
# So cache the first response and use that for all later
# lookups
_capCache = None
_memoryFileLimit = 1024 * 1024 * 10
# Authentication is pluggable. This maps names to IClientAuthentication
# objects.
authenticators = None
STATUS_CODES = ('OK', 'NO', 'BAD', 'PREAUTH', 'BYE')
STATUS_TRANSFORMATIONS = {
'MESSAGES': int, 'RECENT': int, 'UNSEEN': int
}
context = None
def __init__(self, contextFactory = None):
self.tags = {}
self.queued = []
self.authenticators = {}
self.context = contextFactory
self._tag = None
self._parts = None
self._lastCmd = None
def registerAuthenticator(self, auth):
"""Register a new form of authentication
When invoking the authenticate() method of IMAP4Client, the first
matching authentication scheme found will be used. The ordering is
that in which the server lists support authentication schemes.
@type auth: Implementor of C{IClientAuthentication}
@param auth: The object to use to perform the client
side of this authentication scheme.
"""
self.authenticators[auth.getName().upper()] = auth
def rawDataReceived(self, data):
if self.timeout > 0:
self.resetTimeout()
self._pendingSize -= len(data)
if self._pendingSize > 0:
self._pendingBuffer.write(data)
else:
passon = ''
if self._pendingSize < 0:
data, passon = data[:self._pendingSize], data[self._pendingSize:]
self._pendingBuffer.write(data)
rest = self._pendingBuffer
self._pendingBuffer = None
self._pendingSize = None
rest.seek(0, 0)
self._parts.append(rest.read())
self.setLineMode(passon.lstrip('\r\n'))
# def sendLine(self, line):
# print 'S:', repr(line)
# return basic.LineReceiver.sendLine(self, line)
def _setupForLiteral(self, rest, octets):
self._pendingBuffer = self.messageFile(octets)
self._pendingSize = octets
if self._parts is None:
self._parts = [rest, '\r\n']
else:
self._parts.extend([rest, '\r\n'])
self.setRawMode()
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
def connectionLost(self, reason):
"""We are no longer connected"""
if self.timeout > 0:
self.setTimeout(None)
if self.queued is not None:
queued = self.queued
self.queued = None
for cmd in queued:
cmd.defer.errback(reason)
if self.tags is not None:
tags = self.tags
self.tags = None
for cmd in tags.itervalues():
if cmd is not None and cmd.defer is not None:
cmd.defer.errback(reason)
def lineReceived(self, line):
# print 'C: ' + repr(line)
if self.timeout > 0:
self.resetTimeout()
lastPart = line.rfind(' ')
if lastPart != -1:
lastPart = line[lastPart + 1:]
if lastPart.startswith('{') and lastPart.endswith('}'):
# It's a literal a-comin' in
try:
octets = int(lastPart[1:-1])
except ValueError:
raise IllegalServerResponse(line)
if self._parts is None:
self._tag, parts = line.split(None, 1)
else:
parts = line
self._setupForLiteral(parts, octets)
return
if self._parts is None:
# It isn't a literal at all
self._regularDispatch(line)
else:
# If an expression is in progress, no tag is required here
# Since we didn't find a literal indicator, this expression
# is done.
self._parts.append(line)
tag, rest = self._tag, ''.join(self._parts)
self._tag = self._parts = None
self.dispatchCommand(tag, rest)
def timeoutConnection(self):
if self._lastCmd and self._lastCmd.defer is not None:
d, self._lastCmd.defer = self._lastCmd.defer, None
d.errback(TIMEOUT_ERROR)
if self.queued:
for cmd in self.queued:
if cmd.defer is not None:
d, cmd.defer = cmd.defer, d
d.errback(TIMEOUT_ERROR)
self.transport.loseConnection()
def _regularDispatch(self, line):
parts = line.split(None, 1)
if len(parts) != 2:
parts.append('')
tag, rest = parts
self.dispatchCommand(tag, rest)
def messageFile(self, octets):
"""Create a file to which an incoming message may be written.
@type octets: C{int}
@param octets: The number of octets which will be written to the file
@rtype: Any object which implements C{write(string)} and
C{seek(int, int)}
@return: A file-like object
"""
if octets > self._memoryFileLimit:
return tempfile.TemporaryFile()
else:
return StringIO.StringIO()
def makeTag(self):
tag = '%0.4X' % self.tagID
self.tagID += 1
return tag
def dispatchCommand(self, tag, rest):
if self.state is None:
f = self.response_UNAUTH
else:
f = getattr(self, 'response_' + self.state.upper(), None)
if f:
try:
f(tag, rest)
except:
log.err()
self.transport.loseConnection()
else:
log.err("Cannot dispatch: %s, %s, %s" % (self.state, tag, rest))
self.transport.loseConnection()
def response_UNAUTH(self, tag, rest):
if self.state is None:
# Server greeting, this is
status, rest = rest.split(None, 1)
if status.upper() == 'OK':
self.state = 'unauth'
elif status.upper() == 'PREAUTH':
self.state = 'auth'
else:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
b, e = rest.find('['), rest.find(']')
if b != -1 and e != -1:
self.serverGreeting(self.__cbCapabilities(([rest[b:e]], None)))
else:
self.serverGreeting(None)
else:
self._defaultHandler(tag, rest)
def response_AUTH(self, tag, rest):
self._defaultHandler(tag, rest)
def _defaultHandler(self, tag, rest):
if tag == '*' or tag == '+':
if not self.waiting:
self._extraInfo([rest])
else:
cmd = self.tags[self.waiting]
if tag == '+':
cmd.continuation(rest)
else:
cmd.lines.append(rest)
else:
try:
cmd = self.tags[tag]
except KeyError:
# XXX - This is rude.
self.transport.loseConnection()
raise IllegalServerResponse(tag + ' ' + rest)
else:
status, line = rest.split(None, 1)
if status == 'OK':
# Give them this last line, too
cmd.finish(rest, self._extraInfo)
else:
cmd.defer.errback(IMAP4Exception(line))
del self.tags[tag]
self.waiting = None
self._flushQueue()
def _flushQueue(self):
if self.queued:
cmd = self.queued.pop(0)
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
def _extraInfo(self, lines):
# XXX - This is terrible.
# XXX - Also, this should collapse temporally proximate calls into single
# invocations of IMailboxListener methods, where possible.
flags = {}
recent = exists = None
for L in lines:
if L.find('EXISTS') != -1:
exists = int(L.split()[0])
elif L.find('RECENT') != -1:
recent = int(L.split()[0])
elif L.find('READ-ONLY') != -1:
self.modeChanged(0)
elif L.find('READ-WRITE') != -1:
self.modeChanged(1)
elif L.find('FETCH') != -1:
for (mId, fetched) in self.__cbFetch(([L], None)).iteritems():
sum = []
for f in fetched.get('FLAGS', []):
sum.append(f)
flags.setdefault(mId, []).extend(sum)
else:
log.msg('Unhandled unsolicited response: ' + repr(L))
if flags:
self.flagsChanged(flags)
if recent is not None or exists is not None:
self.newMessages(exists, recent)
def sendCommand(self, cmd):
cmd.defer = defer.Deferred()
if self.waiting:
self.queued.append(cmd)
return cmd.defer
t = self.makeTag()
self.tags[t] = cmd
self.sendLine(cmd.format(t))
self.waiting = t
self._lastCmd = cmd
return cmd.defer
def getCapabilities(self, useCache=1):
"""Request the capabilities available on this server.
This command is allowed in any state of connection.
@type useCache: C{bool}
@param useCache: Specify whether to use the capability-cache or to
re-retrieve the capabilities from the server. Server capabilities
should never change, so for normal use, this flag should never be
false.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a
dictionary mapping capability types to lists of supported
mechanisms, or to None if a support list is not applicable.
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cmd = 'CAPABILITY'
resp = ('CAPABILITY',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbCapabilities)
return d
def __cbCapabilities(self, (lines, tagline)):
caps = {}
for rest in lines:
rest = rest.split()[1:]
for cap in rest:
eq = cap.find('=')
if eq == -1:
caps[cap] = None
else:
caps.setdefault(cap[:eq], []).append(cap[eq+1:])
self._capCache = caps
return caps
def logout(self):
"""Inform the server that we are done with the connection.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with None
when the proper server acknowledgement has been received.
"""
d = self.sendCommand(Command('LOGOUT', wantResponse=('BYE',)))
d.addCallback(self.__cbLogout)
return d
def __cbLogout(self, (lines, tagline)):
self.transport.loseConnection()
# We don't particularly care what the server said
return None
def noop(self):
"""Perform no operation.
This command is allowed in any state of connection.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list
of untagged status updates the server responds with.
"""
d = self.sendCommand(Command('NOOP'))
d.addCallback(self.__cbNoop)
return d
def __cbNoop(self, (lines, tagline)):
# Conceivable, this is elidable.
# It is, afterall, a no-op.
return lines
def startTLS(self, contextFactory=None):
"""
Initiates a 'STARTTLS' request and negotiates the TLS / SSL
Handshake.
@param contextFactory: The TLS / SSL Context Factory to
leverage. If the contextFactory is None the IMAP4Client will
either use the current TLS / SSL Context Factory or attempt to
create a new one.
@type contextFactory: C{ssl.ClientContextFactory}
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(IMAP4Exception(
"IMAP4Client requires a TLS context to "
"initiate the STARTTLS handshake"))
if 'STARTTLS' not in self._capCache:
return defer.fail(IMAP4Exception(
"Server does not support secure communication "
"via TLS / SSL"))
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(IMAP4Exception(
"IMAP4Client transport does not implement "
"interfaces.ITLSTransport"))
d = self.sendCommand(Command('STARTTLS'))
d.addCallback(self._startedTLS, contextFactory)
d.addCallback(lambda _: self.getCapabilities())
return d
def authenticate(self, secret):
"""Attempt to enter the authenticated state with the server
This command is allowed in the Non-Authenticated state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the authentication
succeeds and whose errback will be invoked otherwise.
"""
if self._capCache is None:
d = self.getCapabilities()
else:
d = defer.succeed(self._capCache)
d.addCallback(self.__cbAuthenticate, secret)
return d
def __cbAuthenticate(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
if self.startedTLS:
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
else:
def ebStartTLS(err):
err.trap(IMAP4Exception)
# We couldn't negotiate TLS for some reason
return defer.fail(NoSupportedAuthentication(
auths, self.authenticators.keys()))
d = self.startTLS()
d.addErrback(ebStartTLS)
d.addCallback(lambda _: self.getCapabilities())
d.addCallback(self.__cbAuthTLS, secret)
return d
def __cbContinueAuth(self, rest, scheme, secret):
try:
chal = base64.decodestring(rest + '\n')
except binascii.Error:
self.sendLine('*')
raise IllegalServerResponse(rest)
self.transport.loseConnection()
else:
auth = self.authenticators[scheme]
chal = auth.challengeResponse(secret, chal)
self.sendLine(base64.encodestring(chal).strip())
def __cbAuthTLS(self, caps, secret):
auths = caps.get('AUTH', ())
for scheme in auths:
if scheme.upper() in self.authenticators:
cmd = Command('AUTHENTICATE', scheme, (),
self.__cbContinueAuth, scheme,
secret)
return self.sendCommand(cmd)
raise NoSupportedAuthentication(auths, self.authenticators.keys())
def login(self, username, password):
"""Authenticate with the server using a username and password
This command is allowed in the Non-Authenticated state. If the
server supports the STARTTLS capability and our transport supports
TLS, TLS is negotiated before the login command is issued.
A more secure way to log in is to use C{startTLS} or
C{authenticate} or both.
@type username: C{str}
@param username: The username to log in with
@type password: C{str}
@param password: The password to log in with
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if login is successful
and whose errback is invoked otherwise.
"""
d = maybeDeferred(self.getCapabilities)
d.addCallback(self.__cbLoginCaps, username, password)
return d
def serverGreeting(self, caps):
"""Called when the server has sent us a greeting.
@type caps: C{dict}
@param caps: Capabilities the server advertised in its greeting.
"""
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def __cbLoginCaps(self, capabilities, username, password):
# If the server advertises STARTTLS, we might want to try to switch to TLS
tryTLS = 'STARTTLS' in capabilities
# If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallbacks(
self.__cbLoginTLS,
self.__ebLoginTLS,
callbackArgs=(username, password),
)
return d
else:
if nontlsTransport:
log.msg("Server has no TLS support. logging in over cleartext!")
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def _startedTLS(self, result, context):
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def __cbLoginTLS(self, result, username, password):
args = ' '.join((_quote(username), _quote(password)))
return self.sendCommand(Command('LOGIN', args))
def __ebLoginTLS(self, failure):
log.err(failure)
return failure
def namespace(self):
"""Retrieve information about the namespaces available to this account
This command is allowed in the Authenticated and Selected states.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with namespace
information. An example of this information is::
[[['', '/']], [], []]
which indicates a single personal namespace called '' with '/'
as its hierarchical delimiter, and no shared or user namespaces.
"""
cmd = 'NAMESPACE'
resp = ('NAMESPACE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbNamespace)
return d
def __cbNamespace(self, (lines, last)):
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[0] == 'NAMESPACE':
# XXX UGGG parsing hack :(
r = parseNestedParens('(' + parts[1] + ')')[0]
return [e or [] for e in r]
log.err("No NAMESPACE response to NAMESPACE command")
return [[], [], []]
def select(self, mailbox):
"""Select a mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to select
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the select is successful and whose errback is
invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
FLAGS: A list of strings containing the flags settable on
messages in this mailbox.
EXISTS: An integer indicating the number of messages in this
mailbox.
RECENT: An integer indicating the number of \"recent\"
messages in this mailbox.
UNSEEN: An integer indicating the number of messages not
flagged \\Seen in this mailbox.
PERMANENTFLAGS: A list of strings containing the flags that
can be permanently set on messages in this mailbox.
UIDVALIDITY: An integer uniquely identifying this mailbox.
"""
cmd = 'SELECT'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 1)
return d
def examine(self, mailbox):
"""Select a mailbox in read-only mode
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to examine
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with mailbox
information if the examine is successful and whose errback
is invoked otherwise. Mailbox information consists of a dictionary
with the following keys and values::
'FLAGS': A list of strings containing the flags settable on
messages in this mailbox.
'EXISTS': An integer indicating the number of messages in this
mailbox.
'RECENT': An integer indicating the number of \"recent\"
messages in this mailbox.
'UNSEEN': An integer indicating the number of messages not
flagged \\Seen in this mailbox.
'PERMANENTFLAGS': A list of strings containing the flags that
can be permanently set on messages in this mailbox.
'UIDVALIDITY': An integer uniquely identifying this mailbox.
"""
cmd = 'EXAMINE'
args = _prepareMailboxName(mailbox)
resp = ('FLAGS', 'EXISTS', 'RECENT', 'UNSEEN', 'PERMANENTFLAGS', 'UIDVALIDITY')
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbSelect, 0)
return d
def __cbSelect(self, (lines, tagline), rw):
# In the absense of specification, we are free to assume:
# READ-WRITE access
datum = {'READ-WRITE': rw}
lines.append(tagline)
for parts in lines:
split = parts.split()
if len(split) == 2:
if split[1].upper().strip() == 'EXISTS':
try:
datum['EXISTS'] = int(split[0])
except ValueError:
raise IllegalServerResponse(parts)
elif split[1].upper().strip() == 'RECENT':
try:
datum['RECENT'] = int(split[0])
except ValueError:
raise IllegalServerResponse(parts)
else:
log.err('Unhandled SELECT response (1): ' + parts)
elif split[0].upper().strip() == 'FLAGS':
split = parts.split(None, 1)
datum['FLAGS'] = tuple(parseNestedParens(split[1])[0])
elif split[0].upper().strip() == 'OK':
begin = parts.find('[')
end = parts.find(']')
if begin == -1 or end == -1:
raise IllegalServerResponse(parts)
else:
content = parts[begin+1:end].split(None, 1)
if len(content) >= 1:
key = content[0].upper()
if key == 'READ-ONLY':
datum['READ-WRITE'] = 0
elif key == 'READ-WRITE':
datum['READ-WRITE'] = 1
elif key == 'UIDVALIDITY':
try:
datum['UIDVALIDITY'] = int(content[1])
except ValueError:
raise IllegalServerResponse(parts)
elif key == 'UNSEEN':
try:
datum['UNSEEN'] = int(content[1])
except ValueError:
raise IllegalServerResponse(parts)
elif key == 'UIDNEXT':
datum['UIDNEXT'] = int(content[1])
elif key == 'PERMANENTFLAGS':
datum['PERMANENTFLAGS'] = tuple(parseNestedParens(content[1])[0])
else:
log.err('Unhandled SELECT response (2): ' + parts)
else:
log.err('Unhandled SELECT response (3): ' + parts)
else:
log.err('Unhandled SELECT response (4): ' + parts)
return datum
def create(self, name):
"""Create a new mailbox on the server
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to create.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the mailbox creation
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('CREATE', _prepareMailboxName(name)))
def delete(self, name):
"""Delete a mailbox
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The name of the mailbox to delete.
@rtype: C{Deferred}
@return: A deferred whose calblack is invoked if the mailbox is
deleted successfully and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('DELETE', _prepareMailboxName(name)))
def rename(self, oldname, newname):
"""Rename a mailbox
This command is allowed in the Authenticated and Selected states.
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to give the mailbox.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the rename is
successful and whose errback is invoked otherwise.
"""
oldname = _prepareMailboxName(oldname)
newname = _prepareMailboxName(newname)
return self.sendCommand(Command('RENAME', ' '.join((oldname, newname))))
def subscribe(self, name):
"""Add a mailbox to the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to mark as 'active' or 'subscribed'
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the subscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('SUBSCRIBE', _prepareMailboxName(name)))
def unsubscribe(self, name):
"""Remove a mailbox from the subscription list
This command is allowed in the Authenticated and Selected states.
@type name: C{str}
@param name: The mailbox to unsubscribe
@rtype: C{Deferred}
@return: A deferred whose callback is invoked if the unsubscription
is successful and whose errback is invoked otherwise.
"""
return self.sendCommand(Command('UNSUBSCRIBE', _prepareMailboxName(name)))
def list(self, reference, wildcard):
"""List a subset of the available mailboxes
This command is allowed in the Authenticated and Selected states.
@type reference: C{str}
@param reference: The context in which to interpret C{wildcard}
@type wildcard: C{str}
@param wildcard: The pattern of mailbox names to match, optionally
including either or both of the '*' and '%' wildcards. '*' will
match zero or more characters and cross hierarchical boundaries.
'%' will also match zero or more characters, but is limited to a
single hierarchical level.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of C{tuple}s,
the first element of which is a C{tuple} of mailbox flags, the second
element of which is the hierarchy delimiter for this mailbox, and the
third of which is the mailbox name; if the command is unsuccessful,
the deferred's errback is invoked instead.
"""
cmd = 'LIST'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LIST',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LIST')
return d
def lsub(self, reference, wildcard):
"""List a subset of the subscribed available mailboxes
This command is allowed in the Authenticated and Selected states.
The parameters and returned object are the same as for the C{list}
method, with one slight difference: Only mailboxes which have been
subscribed can be included in the resulting list.
"""
cmd = 'LSUB'
args = '"%s" "%s"' % (reference, wildcard.encode('imap4-utf-7'))
resp = ('LSUB',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbList, 'LSUB')
return d
def __cbList(self, (lines, last), command):
results = []
for L in lines:
parts = parseNestedParens(L)
if len(parts) != 4:
raise IllegalServerResponse, L
if parts[0] == command:
parts[1] = tuple(parts[1])
results.append(tuple(parts[1:]))
return results
def status(self, mailbox, *names):
"""Retrieve the status of the given mailbox
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The name of the mailbox to query
@type names: C{str}
@param names: The status names to query. These may be any number of:
MESSAGES, RECENT, UIDNEXT, UIDVALIDITY, and UNSEEN.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the status information
if the command is successful and whose errback is invoked otherwise.
"""
cmd = 'STATUS'
args = "%s (%s)" % (_prepareMailboxName(mailbox), ' '.join(names))
resp = ('STATUS',)
d = self.sendCommand(Command(cmd, args, wantResponse=resp))
d.addCallback(self.__cbStatus)
return d
def __cbStatus(self, (lines, last)):
status = {}
for line in lines:
parts = parseNestedParens(line)
if parts[0] == 'STATUS':
items = parts[2]
items = [items[i:i+2] for i in range(0, len(items), 2)]
status.update(dict(items))
for k in status.keys():
t = self.STATUS_TRANSFORMATIONS.get(k)
if t:
try:
status[k] = t(status[k])
except Exception, e:
raise IllegalServerResponse('(%s %s): %s' % (k, status[k], str(e)))
return status
def append(self, mailbox, message, flags = (), date = None):
"""Add the given message to the given mailbox.
This command is allowed in the Authenticated and Selected states.
@type mailbox: C{str}
@param mailbox: The mailbox to which to add this message.
@type message: Any file-like object
@param message: The message to add, in RFC822 format. Newlines
in this file should be \\r\\n-style.
@type flags: Any iterable of C{str}
@param flags: The flags to associated with this message.
@type date: C{str}
@param date: The date to associate with this message. This should
be of the format DD-MM-YYYY HH:MM:SS +/-HHMM. For example, in
Eastern Standard Time, on July 1st 2004 at half past 1 PM,
\"01-07-2004 13:30:00 -0500\".
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
message.seek(0, 2)
L = message.tell()
message.seek(0, 0)
fmt = '%s (%s)%s {%d}'
if date:
date = ' "%s"' % date
else:
date = ''
cmd = fmt % (
_prepareMailboxName(mailbox), ' '.join(flags),
date, L
)
d = self.sendCommand(Command('APPEND', cmd, (), self.__cbContinueAppend, message))
return d
def __cbContinueAppend(self, lines, message):
s = basic.FileSender()
return s.beginFileTransfer(message, self.transport, None
).addCallback(self.__cbFinishAppend)
def __cbFinishAppend(self, foo):
self.sendLine('')
def check(self):
"""Tell the server to perform a checkpoint
This command is allowed in the Selected state.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when this command
succeeds or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CHECK'))
def close(self):
"""Return the connection to the Authenticated state.
This command is allowed in the Selected state.
Issuing this command will also remove all messages flagged \\Deleted
from the selected mailbox if it is opened in read-write mode,
otherwise it indicates success by no messages are removed.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked when the command
completes successfully or whose errback is invoked if it fails.
"""
return self.sendCommand(Command('CLOSE'))
def expunge(self):
"""Return the connection to the Authenticate state.
This command is allowed in the Selected state.
Issuing this command will perform the same actions as issuing the
close command, but will also generate an 'expunge' response for
every message deleted.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
'expunge' responses when this command is successful or whose errback
is invoked otherwise.
"""
cmd = 'EXPUNGE'
resp = ('EXPUNGE',)
d = self.sendCommand(Command(cmd, wantResponse=resp))
d.addCallback(self.__cbExpunge)
return d
def __cbExpunge(self, (lines, last)):
ids = []
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[1] == 'EXPUNGE':
try:
ids.append(int(parts[0]))
except ValueError:
raise IllegalServerResponse, line
return ids
def search(self, *queries, **kwarg):
"""Search messages in the currently selected mailbox
This command is allowed in the Selected state.
Any non-zero number of queries are accepted by this method, as
returned by the C{Query}, C{Or}, and C{Not} functions.
One keyword argument is accepted: if uid is passed in with a non-zero
value, the server is asked to return message UIDs instead of message
sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback will be invoked with a list of all
the message sequence numbers return by the search, or whose errback
will be invoked if there is an error.
"""
if kwarg.get('uid'):
cmd = 'UID SEARCH'
else:
cmd = 'SEARCH'
args = ' '.join(queries)
d = self.sendCommand(Command(cmd, args, wantResponse=(cmd,)))
d.addCallback(self.__cbSearch)
return d
def __cbSearch(self, (lines, end)):
ids = []
for line in lines:
parts = line.split(None, 1)
if len(parts) == 2:
if parts[0] == 'SEARCH':
try:
ids.extend(map(int, parts[1].split()))
except ValueError:
raise IllegalServerResponse, line
return ids
def fetchUID(self, messages, uid=0):
"""Retrieve the unique identifier for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message sequence numbers to unique message identifiers, or whose
errback is invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, uid=1)
d.addCallback(self.__cbFetch)
return d
def fetchFlags(self, messages, uid=0):
"""Retrieve the flags for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve flags.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to lists of flags, or whose errback is invoked if
there is an error.
"""
d = self._fetch(str(messages), useUID=uid, flags=1)
d.addCallback(self.__cbFetch)
return d
def fetchInternalDate(self, messages, uid=0):
"""Retrieve the internal date associated with one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve the internal date.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to date strings, or whose errback is invoked
if there is an error. Date strings take the format of
\"day-month-year time timezone\".
"""
d = self._fetch(str(messages), useUID=uid, internaldate=1)
d.addCallback(self.__cbFetch)
return d
def fetchEnvelope(self, messages, uid=0):
"""Retrieve the envelope data for one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve envelope data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to envelope data, or whose errback is invoked
if there is an error. Envelope data consists of a sequence of the
date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
and message-id header fields. The date, subject, in-reply-to, and
message-id fields are strings, while the from, sender, reply-to,
to, cc, and bcc fields contain address data. Address data consists
of a sequence of name, source route, mailbox name, and hostname.
Fields which are not present for a particular address may be C{None}.
"""
d = self._fetch(str(messages), useUID=uid, envelope=1)
d.addCallback(self.__cbFetch)
return d
def fetchBodyStructure(self, messages, uid=0):
"""Retrieve the structure of the body of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: The messages for which to retrieve body structure
data.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body structure data, or whose errback is invoked
if there is an error. Body structure data describes the MIME-IMB
format of a message and consists of a sequence of mime type, mime
subtype, parameters, content id, description, encoding, and size.
The fields following the size field are variable: if the mime
type/subtype is message/rfc822, the contained message's envelope
information, body structure data, and number of lines of text; if
the mime type is text, the number of lines of text. Extension fields
may also be included; if present, they are: the MD5 hash of the body,
body disposition, body language.
"""
d = self._fetch(messages, useUID=uid, bodystructure=1)
d.addCallback(self.__cbFetch)
return d
def fetchSimplifiedBody(self, messages, uid=0):
"""Retrieve the simplified body structure of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to body data, or whose errback is invoked
if there is an error. The simplified body structure is the same
as the body structure, except that extension fields will never be
present.
"""
d = self._fetch(messages, useUID=uid, body=1)
d.addCallback(self.__cbFetch)
return d
def fetchMessage(self, messages, uid=0):
"""Retrieve one or more entire messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message objects (as returned by self.messageFile(), file objects by
default), to additional information, or whose errback is invoked if
there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822=1)
d.addCallback(self.__cbFetch)
return d
def fetchHeaders(self, messages, uid=0):
"""Retrieve headers of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dicts of message headers, or whose errback is
invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822header=1)
d.addCallback(self.__cbFetch)
return d
def fetchBody(self, messages, uid=0):
"""Retrieve body text of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to file-like objects containing body text, or whose
errback is invoked if there is an error.
"""
d = self._fetch(messages, useUID=uid, rfc822text=1)
d.addCallback(self.__cbFetch)
return d
def fetchSize(self, messages, uid=0):
"""Retrieve the size, in octets, of one or more messages
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to sizes, or whose errback is invoked if there is
an error.
"""
d = self._fetch(messages, useUID=uid, rfc822size=1)
d.addCallback(self.__cbFetch)
return d
def fetchFull(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, C{fetchEnvelope}, and C{fetchSimplifiedBody}
functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", "envelope", and "body".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1, body=1
)
d.addCallback(self.__cbFetch)
return d
def fetchAll(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate},
C{fetchSize}, and C{fetchEnvelope} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys
are "flags", "date", "size", and "envelope".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1,
rfc822size=1, envelope=1
)
d.addCallback(self.__cbFetch)
return d
def fetchFast(self, messages, uid=0):
"""Retrieve several different fields of one or more messages
This command is allowed in the Selected state. This is equivalent
to issuing all of the C{fetchFlags}, C{fetchInternalDate}, and
C{fetchSize} functions.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a dict mapping
message numbers to dict of the retrieved data values, or whose
errback is invoked if there is an error. They dictionary keys are
"flags", "date", and "size".
"""
d = self._fetch(
messages, useUID=uid, flags=1, internaldate=1, rfc822size=1
)
d.addCallback(self.__cbFetch)
return d
def __cbFetch(self, (lines, last)):
flags = {}
for line in lines:
parts = line.split(None, 2)
if len(parts) == 3:
if parts[1] == 'FETCH':
try:
id = int(parts[0])
except ValueError:
raise IllegalServerResponse, line
else:
data = parseNestedParens(parts[2])
while len(data) == 1 and isinstance(data, types.ListType):
data = data[0]
while data:
if len(data) < 2:
raise IllegalServerResponse("Not enough arguments", data)
flags.setdefault(id, {})[data[0]] = data[1]
del data[:2]
else:
print '(2)Ignoring ', parts
else:
print '(3)Ignoring ', parts
return flags
def fetchSpecific(self, messages, uid=0, headerType=None,
headerNumber=None, headerArgs=None, peek=None,
offset=None, length=None):
"""Retrieve a specific section of one or more messages
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@type headerType: C{str}
@param headerType: If specified, must be one of HEADER,
HEADER.FIELDS, HEADER.FIELDS.NOT, MIME, or TEXT, and will determine
which part of the message is retrieved. For HEADER.FIELDS and
HEADER.FIELDS.NOT, C{headerArgs} must be a sequence of header names.
For MIME, C{headerNumber} must be specified.
@type headerNumber: C{int} or C{int} sequence
@param headerNumber: The nested rfc822 index specifying the
entity to retrieve. For example, C{1} retrieves the first
entity of the message, and C{(2, 1, 3}) retrieves the 3rd
entity inside the first entity inside the second entity of
the message.
@type headerArgs: A sequence of C{str}
@param headerArgs: If C{headerType} is HEADER.FIELDS, these are the
headers to retrieve. If it is HEADER.FIELDS.NOT, these are the
headers to exclude from retrieval.
@type peek: C{bool}
@param peek: If true, cause the server to not set the \\Seen
flag on this message as a result of this command.
@type offset: C{int}
@param offset: The number of octets at the beginning of the result
to skip.
@type length: C{int}
@param length: The number of octets to retrieve.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a mapping of
message numbers to retrieved data, or whose errback is invoked
if there is an error.
"""
fmt = '%s BODY%s[%s%s%s]%s'
if headerNumber is None:
number = ''
elif isinstance(headerNumber, types.IntType):
number = str(headerNumber)
else:
number = '.'.join(headerNumber)
if headerType is None:
header = ''
elif number:
header = '.' + headerType
else:
header = headerType
if header:
if headerArgs is not None:
payload = ' (%s)' % ' '.join(headerArgs)
else:
payload = ' ()'
else:
payload = ''
if offset is None:
extra = ''
else:
extra = '<%d.%d>' % (offset, length)
fetch = uid and 'UID FETCH' or 'FETCH'
cmd = fmt % (messages, peek and '.PEEK' or '', number, header, payload, extra)
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
d.addCallback(self.__cbFetchSpecific)
return d
def __cbFetchSpecific(self, (lines, last)):
info = {}
for line in lines:
parts = line.split(None, 2)
if len(parts) == 3:
if parts[1] == 'FETCH':
try:
id = int(parts[0])
except ValueError:
raise IllegalServerResponse, line
else:
info[id] = parseNestedParens(parts[2])
return info
def _fetch(self, messages, useUID=0, **terms):
fetch = useUID and 'UID FETCH' or 'FETCH'
if 'rfc822text' in terms:
del terms['rfc822text']
terms['rfc822.text'] = True
if 'rfc822size' in terms:
del terms['rfc822size']
terms['rfc822.size'] = True
if 'rfc822header' in terms:
del terms['rfc822header']
terms['rfc822.header'] = True
cmd = '%s (%s)' % (messages, ' '.join([s.upper() for s in terms.keys()]))
d = self.sendCommand(Command(fetch, cmd, wantResponse=('FETCH',)))
return d
def setFlags(self, messages, flags, silent=1, uid=0):
"""Set the flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and 'FLAGS.SILENT' or 'FLAGS', flags, uid)
def addFlags(self, messages, flags, silent=1, uid=0):
"""Add to the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and '+FLAGS.SILENT' or '+FLAGS', flags, uid)
def removeFlags(self, messages, flags, silent=1, uid=0):
"""Remove from the set flags for one or more messages.
This command is allowed in the Selected state.
@type messages: C{MessageSet} or C{str}
@param messages: A message sequence set
@type flags: Any iterable of C{str}
@param flags: The flags to set
@type silent: C{bool}
@param silent: If true, cause the server to supress its verbose
response.
@type uid: C{bool}
@param uid: Indicates whether the message sequence set is of message
numbers or of unique message IDs.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a list of the
the server's responses (C{[]} if C{silent} is true) or whose
errback is invoked if there is an error.
"""
return self._store(str(messages), silent and '-FLAGS.SILENT' or '-FLAGS', flags, uid)
def _store(self, messages, cmd, flags, uid):
store = uid and 'UID STORE' or 'STORE'
args = ' '.join((messages, cmd, '(%s)' % ' '.join(flags)))
d = self.sendCommand(Command(store, args, wantResponse=('FETCH',)))
d.addCallback(self.__cbFetch)
return d
def copy(self, messages, mailbox, uid):
"""Copy the specified messages to the specified mailbox.
This command is allowed in the Selected state.
@type messages: C{str}
@param messages: A message sequence set
@type mailbox: C{str}
@param mailbox: The mailbox to which to copy the messages
@type uid: C{bool}
@param uid: If true, the C{messages} refers to message UIDs, rather
than message sequence numbers.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with a true value
when the copy is successful, or whose errback is invoked if there
is an error.
"""
if uid:
cmd = 'UID COPY'
else:
cmd = 'COPY'
args = '%s %s' % (messages, _prepareMailboxName(mailbox))
return self.sendCommand(Command(cmd, args))
#
# IMailboxListener methods
#
def modeChanged(self, writeable):
"""Override me"""
def flagsChanged(self, newFlags):
"""Override me"""
def newMessages(self, exists, recent):
"""Override me"""
class IllegalIdentifierError(IMAP4Exception): pass
def parseIdList(s):
res = MessageSet()
parts = s.split(',')
for p in parts:
if ':' in p:
low, high = p.split(':', 1)
try:
if low == '*':
low = None
else:
low = long(low)
if high == '*':
high = None
else:
high = long(high)
res.extend((low, high))
except ValueError:
raise IllegalIdentifierError(p)
else:
try:
if p == '*':
p = None
else:
p = long(p)
except ValueError:
raise IllegalIdentifierError(p)
else:
res.extend(p)
return res
class IllegalQueryError(IMAP4Exception): pass
_SIMPLE_BOOL = (
'ALL', 'ANSWERED', 'DELETED', 'DRAFT', 'FLAGGED', 'NEW', 'OLD', 'RECENT',
'SEEN', 'UNANSWERED', 'UNDELETED', 'UNDRAFT', 'UNFLAGGED', 'UNSEEN'
)
_NO_QUOTES = (
'LARGER', 'SMALLER', 'UID'
)
def Query(sorted=0, **kwarg):
"""Create a query string
Among the accepted keywords are::
all : If set to a true value, search all messages in the
current mailbox
answered : If set to a true value, search messages flagged with
\\Answered
bcc : A substring to search the BCC header field for
before : Search messages with an internal date before this
value. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
body : A substring to search the body of the messages for
cc : A substring to search the CC header field for
deleted : If set to a true value, search messages flagged with
\\Deleted
draft : If set to a true value, search messages flagged with
\\Draft
flagged : If set to a true value, search messages flagged with
\\Flagged
from : A substring to search the From header field for
header : A two-tuple of a header name and substring to search
for in that header
keyword : Search for messages with the given keyword set
larger : Search for messages larger than this number of octets
messages : Search only the given message sequence set.
new : If set to a true value, search messages flagged with
\\Recent but not \\Seen
old : If set to a true value, search messages not flagged with
\\Recent
on : Search messages with an internal date which is on this
date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
recent : If set to a true value, search for messages flagged with
\\Recent
seen : If set to a true value, search for messages flagged with
\\Seen
sentbefore : Search for messages with an RFC822 'Date' header before
this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
senton : Search for messages with an RFC822 'Date' header which is
on this date The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
sentsince : Search for messages with an RFC822 'Date' header which is
after this date. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
since : Search for messages with an internal date that is after
this date.. The given date should be a string in the format
of 'DD-Mon-YYYY'. For example, '03-Mar-2003'.
smaller : Search for messages smaller than this number of octets
subject : A substring to search the 'subject' header for
text : A substring to search the entire message for
to : A substring to search the 'to' header for
uid : Search only the messages in the given message set
unanswered : If set to a true value, search for messages not
flagged with \\Answered
undeleted : If set to a true value, search for messages not
flagged with \\Deleted
undraft : If set to a true value, search for messages not
flagged with \\Draft
unflagged : If set to a true value, search for messages not
flagged with \\Flagged
unkeyword : Search for messages without the given keyword set
unseen : If set to a true value, search for messages not
flagged with \\Seen
@type sorted: C{bool}
@param sorted: If true, the output will be sorted, alphabetically.
The standard does not require it, but it makes testing this function
easier. The default is zero, and this should be acceptable for any
application.
@rtype: C{str}
@return: The formatted query string
"""
cmd = []
keys = kwarg.keys()
if sorted:
keys.sort()
for k in keys:
v = kwarg[k]
k = k.upper()
if k in _SIMPLE_BOOL and v:
cmd.append(k)
elif k == 'HEADER':
cmd.extend([k, v[0], '"%s"' % (v[1],)])
elif k not in _NO_QUOTES:
cmd.extend([k, '"%s"' % (v,)])
else:
cmd.extend([k, '%s' % (v,)])
if len(cmd) > 1:
return '(%s)' % ' '.join(cmd)
else:
return ' '.join(cmd)
def Or(*args):
"""The disjunction of two or more queries"""
if len(args) < 2:
raise IllegalQueryError, args
elif len(args) == 2:
return '(OR %s %s)' % args
else:
return '(OR %s %s)' % (args[0], Or(*args[1:]))
def Not(query):
"""The negation of a query"""
return '(NOT %s)' % (query,)
class MismatchedNesting(IMAP4Exception):
pass
class MismatchedQuoting(IMAP4Exception):
pass
def wildcardToRegexp(wildcard, delim=None):
wildcard = wildcard.replace('*', '(?:.*?)')
if delim is None:
wildcard = wildcard.replace('%', '(?:.*?)')
else:
wildcard = wildcard.replace('%', '(?:(?:[^%s])*?)' % re.escape(delim))
return re.compile(wildcard, re.I)
def splitQuoted(s):
"""Split a string into whitespace delimited tokens
Tokens that would otherwise be separated but are surrounded by \"
remain as a single token. Any token that is not quoted and is
equal to \"NIL\" is tokenized as C{None}.
@type s: C{str}
@param s: The string to be split
@rtype: C{list} of C{str}
@return: A list of the resulting tokens
@raise MismatchedQuoting: Raised if an odd number of quotes are present
"""
s = s.strip()
result = []
inQuote = inWord = start = 0
for (i, c) in zip(range(len(s)), s):
if c == '"' and not inQuote:
inQuote = 1
start = i + 1
elif c == '"' and inQuote:
inQuote = 0
result.append(s[start:i])
start = i + 1
elif not inWord and not inQuote and c not in ('"' + string.whitespace):
inWord = 1
start = i
elif inWord and not inQuote and c in string.whitespace:
if s[start:i] == 'NIL':
result.append(None)
else:
result.append(s[start:i])
start = i
inWord = 0
if inQuote:
raise MismatchedQuoting(s)
if inWord:
if s[start:] == 'NIL':
result.append(None)
else:
result.append(s[start:])
return result
def splitOn(sequence, predicate, transformers):
result = []
mode = predicate(sequence[0])
tmp = [sequence[0]]
for e in sequence[1:]:
p = predicate(e)
if p != mode:
result.extend(transformers[mode](tmp))
tmp = [e]
mode = p
else:
tmp.append(e)
result.extend(transformers[mode](tmp))
return result
def collapseStrings(results):
"""
Turns a list of length-one strings and lists into a list of longer
strings and lists. For example,
['a', 'b', ['c', 'd']] is returned as ['ab', ['cd']]
@type results: C{list} of C{str} and C{list}
@param results: The list to be collapsed
@rtype: C{list} of C{str} and C{list}
@return: A new list which is the collapsed form of C{results}
"""
copy = []
begun = None
listsList = [isinstance(s, types.ListType) for s in results]
pred = lambda e: isinstance(e, types.TupleType)
tran = {
0: lambda e: splitQuoted(''.join(e)),
1: lambda e: [''.join([i[0] for i in e])]
}
for (i, c, isList) in zip(range(len(results)), results, listsList):
if isList:
if begun is not None:
copy.extend(splitOn(results[begun:i], pred, tran))
begun = None
copy.append(collapseStrings(c))
elif begun is None:
begun = i
if begun is not None:
copy.extend(splitOn(results[begun:], pred, tran))
return copy
def parseNestedParens(s, handleLiteral = 1):
"""Parse an s-exp-like string into a more useful data structure.
@type s: C{str}
@param s: The s-exp-like string to parse
@rtype: C{list} of C{str} and C{list}
@return: A list containing the tokens present in the input.
@raise MismatchedNesting: Raised if the number or placement
of opening or closing parenthesis is invalid.
"""
s = s.strip()
inQuote = 0
contentStack = [[]]
try:
i = 0
L = len(s)
while i < L:
c = s[i]
if inQuote:
if c == '\\':
contentStack[-1].append(s[i+1])
i += 2
continue
elif c == '"':
inQuote = not inQuote
contentStack[-1].append(c)
i += 1
else:
if c == '"':
contentStack[-1].append(c)
inQuote = not inQuote
i += 1
elif handleLiteral and c == '{':
end = s.find('}', i)
if end == -1:
raise ValueError, "Malformed literal"
literalSize = int(s[i+1:end])
contentStack[-1].append((s[end+3:end+3+literalSize],))
i = end + 3 + literalSize
elif c == '(' or c == '[':
contentStack.append([])
i += 1
elif c == ')' or c == ']':
contentStack[-2].append(contentStack.pop())
i += 1
else:
contentStack[-1].append(c)
i += 1
except IndexError:
raise MismatchedNesting(s)
if len(contentStack) != 1:
raise MismatchedNesting(s)
return collapseStrings(contentStack[0])
def _quote(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
def _literal(s):
return '{%d}\r\n%s' % (len(s), s)
class DontQuoteMe:
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
_ATOM_SPECIALS = '(){ %*"'
def _needsQuote(s):
if s == '':
return 1
for c in s:
if c < '\x20' or c > '\x7f':
return 1
if c in _ATOM_SPECIALS:
return 1
return 0
def _prepareMailboxName(name):
name = name.encode('imap4-utf-7')
if _needsQuote(name):
return _quote(name)
return name
def _needsLiteral(s):
# Change this to "return 1" to wig out stupid clients
return '\n' in s or '\r' in s or len(s) > 1000
def collapseNestedLists(items):
"""Turn a nested list structure into an s-exp-like string.
Strings in C{items} will be sent as literals if they contain CR or LF,
otherwise they will be quoted. References to None in C{items} will be
translated to the atom NIL. Objects with a 'read' attribute will have
it called on them with no arguments and the returned string will be
inserted into the output as a literal. Integers will be converted to
strings and inserted into the output unquoted. Instances of
C{DontQuoteMe} will be converted to strings and inserted into the output
unquoted.
This function used to be much nicer, and only quote things that really
needed to be quoted (and C{DontQuoteMe} did not exist), however, many
broken IMAP4 clients were unable to deal with this level of sophistication,
forcing the current behavior to be adopted for practical reasons.
@type items: Any iterable
@rtype: C{str}
"""
pieces = []
for i in items:
if i is None:
pieces.extend([' ', 'NIL'])
elif isinstance(i, (DontQuoteMe, int, long)):
pieces.extend([' ', str(i)])
elif isinstance(i, types.StringTypes):
if _needsLiteral(i):
pieces.extend([' ', '{', str(len(i)), '}', IMAP4Server.delimiter, i])
else:
pieces.extend([' ', _quote(i)])
elif hasattr(i, 'read'):
d = i.read()
pieces.extend([' ', '{', str(len(d)), '}', IMAP4Server.delimiter, d])
else:
pieces.extend([' ', '(%s)' % (collapseNestedLists(i),)])
return ''.join(pieces[1:])
class IClientAuthentication(Interface):
def getName():
"""Return an identifier associated with this authentication scheme.
@rtype: C{str}
"""
def challengeResponse(secret, challenge):
"""Generate a challenge response string"""
class CramMD5ClientAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "CRAM-MD5"
def challengeResponse(self, secret, chal):
response = hmac.HMAC(secret, chal).hexdigest()
return '%s %s' % (self.user, response)
class LOGINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
self.challengeResponse = self.challengeUsername
def getName(self):
return "LOGIN"
def challengeUsername(self, secret, chal):
# Respond to something like "Username:"
self.challengeResponse = self.challengeSecret
return self.user
def challengeSecret(self, secret, chal):
# Respond to something like "Password:"
return secret
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal):
return '%s\0%s\0' % (self.user, secret)
class MailboxException(IMAP4Exception): pass
class MailboxCollision(MailboxException):
def __str__(self):
return 'Mailbox named %s already exists' % self.args
class NoSuchMailbox(MailboxException):
def __str__(self):
return 'No mailbox named %s exists' % self.args
class ReadOnlyMailbox(MailboxException):
def __str__(self):
return 'Mailbox open in read-only state'
class IAccount(Interface):
"""Interface for Account classes
Implementors of this interface should consider implementing
C{INamespacePresenter}.
"""
def addMailbox(name, mbox = None):
"""Add a new mailbox to this account
@type name: C{str}
@param name: The name associated with this mailbox. It may not
contain multiple hierarchical parts.
@type mbox: An object implementing C{IMailbox}
@param mbox: The mailbox to associate with this name. If C{None},
a suitable default is created and used.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added for
some reason. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def create(pathspec):
"""Create a new mailbox from the given hierarchical name.
@type pathspec: C{str}
@param pathspec: The full hierarchical name of a new mailbox to create.
If any of the inferior hierarchical names to this one do not exist,
they are created as well.
@rtype: C{Deferred} or C{bool}
@return: A true value if the creation succeeds, or a deferred whose
callback will be invoked when the creation succeeds.
@raise MailboxException: Raised if this mailbox cannot be added.
This may also be raised asynchronously, if a C{Deferred} is
returned.
"""
def select(name, rw=True):
"""Acquire a mailbox, given its name.
@type name: C{str}
@param name: The mailbox to acquire
@type rw: C{bool}
@param rw: If a true value, request a read-write version of this
mailbox. If a false value, request a read-only version.
@rtype: Any object implementing C{IMailbox} or C{Deferred}
@return: The mailbox object, or a C{Deferred} whose callback will
be invoked with the mailbox object. None may be returned if the
specified mailbox may not be selected for any reason.
"""
def delete(name):
"""Delete the mailbox with the specified name.
@type name: C{str}
@param name: The mailbox to delete.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully deleted, or a
C{Deferred} whose callback will be invoked when the deletion
completes.
@raise MailboxException: Raised if this mailbox cannot be deleted.
This may also be raised asynchronously, if a C{Deferred} is returned.
"""
def rename(oldname, newname):
"""Rename a mailbox
@type oldname: C{str}
@param oldname: The current name of the mailbox to rename.
@type newname: C{str}
@param newname: The new name to associate with the mailbox.
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is successfully renamed, or a
C{Deferred} whose callback will be invoked when the rename operation
is completed.
@raise MailboxException: Raised if this mailbox cannot be
renamed. This may also be raised asynchronously, if a C{Deferred}
is returned.
"""
def isSubscribed(name):
"""Check the subscription status of a mailbox
@type name: C{str}
@param name: The name of the mailbox to check
@rtype: C{Deferred} or C{bool}
@return: A true value if the given mailbox is currently subscribed
to, a false value otherwise. A C{Deferred} may also be returned
whose callback will be invoked with one of these values.
"""
def subscribe(name):
"""Subscribe to a mailbox
@type name: C{str}
@param name: The name of the mailbox to subscribe to
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is subscribed to successfully,
or a Deferred whose callback will be invoked with this value when
the subscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
subscribed to. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def unsubscribe(name):
"""Unsubscribe from a mailbox
@type name: C{str}
@param name: The name of the mailbox to unsubscribe from
@rtype: C{Deferred} or C{bool}
@return: A true value if the mailbox is unsubscribed from successfully,
or a Deferred whose callback will be invoked with this value when
the unsubscription is successful.
@raise MailboxException: Raised if this mailbox cannot be
unsubscribed from. This may also be raised asynchronously, if a
C{Deferred} is returned.
"""
def listMailboxes(ref, wildcard):
"""List all the mailboxes that meet a certain criteria
@type ref: C{str}
@param ref: The context in which to apply the wildcard
@type wildcard: C{str}
@param wildcard: An expression against which to match mailbox names.
'*' matches any number of characters in a mailbox name, and '%'
matches similarly, but will not match across hierarchical boundaries.
@rtype: C{list} of C{tuple}
@return: A list of C{(mailboxName, mailboxObject)} which meet the
given criteria. C{mailboxObject} should implement either
C{IMailboxInfo} or C{IMailbox}. A Deferred may also be returned.
"""
class INamespacePresenter(Interface):
def getPersonalNamespaces():
"""Report the available personal namespaces.
Typically there should be only one personal namespace. A common
name for it is \"\", and its hierarchical delimiter is usually
\"/\".
@rtype: iterable of two-tuples of strings
@return: The personal namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getSharedNamespaces():
"""Report the available shared namespaces.
Shared namespaces do not belong to any individual user but are
usually to one or more of them. Examples of shared namespaces
might be \"#news\" for a usenet gateway.
@rtype: iterable of two-tuples of strings
@return: The shared namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
def getUserNamespaces():
"""Report the available user namespaces.
These are namespaces that contain folders belonging to other users
access to which this account has been granted.
@rtype: iterable of two-tuples of strings
@return: The user namespaces and their hierarchical delimiters.
If no namespaces of this type exist, None should be returned.
"""
class MemoryAccount(object):
implements(IAccount, INamespacePresenter)
mailboxes = None
subscriptions = None
top_id = 0
def __init__(self, name):
self.name = name
self.mailboxes = {}
self.subscriptions = []
def allocateID(self):
id = self.top_id
self.top_id += 1
return id
##
## IAccount
##
def addMailbox(self, name, mbox = None):
name = name.upper()
if self.mailboxes.has_key(name):
raise MailboxCollision, name
if mbox is None:
mbox = self._emptyMailbox(name, self.allocateID())
self.mailboxes[name] = mbox
return 1
def create(self, pathspec):
paths = filter(None, pathspec.split('/'))
for accum in range(1, len(paths)):
try:
self.addMailbox('/'.join(paths[:accum]))
except MailboxCollision:
pass
try:
self.addMailbox('/'.join(paths))
except MailboxCollision:
if not pathspec.endswith('/'):
return False
return True
def _emptyMailbox(self, name, id):
raise NotImplementedError
def select(self, name, readwrite=1):
return self.mailboxes.get(name.upper())
def delete(self, name):
name = name.upper()
# See if this mailbox exists at all
mbox = self.mailboxes.get(name)
if not mbox:
raise MailboxException("No such mailbox")
# See if this box is flagged \Noselect
if r'\Noselect' in mbox.getFlags():
# Check for hierarchically inferior mailboxes with this one
# as part of their root.
for others in self.mailboxes.keys():
if others != name and others.startswith(name):
raise MailboxException, "Hierarchically inferior mailboxes exist and \\Noselect is set"
mbox.destroy()
# iff there are no hierarchically inferior names, we will
# delete it from our ken.
if self._inferiorNames(name) > 1:
del self.mailboxes[name]
def rename(self, oldname, newname):
oldname = oldname.upper()
newname = newname.upper()
if not self.mailboxes.has_key(oldname):
raise NoSuchMailbox, oldname
inferiors = self._inferiorNames(oldname)
inferiors = [(o, o.replace(oldname, newname, 1)) for o in inferiors]
for (old, new) in inferiors:
if self.mailboxes.has_key(new):
raise MailboxCollision, new
for (old, new) in inferiors:
self.mailboxes[new] = self.mailboxes[old]
del self.mailboxes[old]
def _inferiorNames(self, name):
inferiors = []
for infname in self.mailboxes.keys():
if infname.startswith(name):
inferiors.append(infname)
return inferiors
def isSubscribed(self, name):
return name.upper() in self.subscriptions
def subscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
self.subscriptions.append(name)
def unsubscribe(self, name):
name = name.upper()
if name not in self.subscriptions:
raise MailboxException, "Not currently subscribed to " + name
self.subscriptions.remove(name)
def listMailboxes(self, ref, wildcard):
ref = self._inferiorNames(ref.upper())
wildcard = wildcardToRegexp(wildcard, '/')
return [(i, self.mailboxes[i]) for i in ref if wildcard.match(i)]
##
## INamespacePresenter
##
def getPersonalNamespaces(self):
return [["", "/"]]
def getSharedNamespaces(self):
return None
def getOtherNamespaces(self):
return None
_statusRequestDict = {
'MESSAGES': 'getMessageCount',
'RECENT': 'getRecentCount',
'UIDNEXT': 'getUIDNext',
'UIDVALIDITY': 'getUIDValidity',
'UNSEEN': 'getUnseenCount'
}
def statusRequestHelper(mbox, names):
r = {}
for n in names:
r[n] = getattr(mbox, _statusRequestDict[n.upper()])()
return r
def parseAddr(addr):
if addr is None:
return [(None, None, None),]
addrs = email.Utils.getaddresses([addr])
return [[fn or None, None] + addr.split('@') for fn, addr in addrs]
def getEnvelope(msg):
headers = msg.getHeaders(True)
date = headers.get('date')
subject = headers.get('subject')
from_ = headers.get('from')
sender = headers.get('sender', from_)
reply_to = headers.get('reply-to', from_)
to = headers.get('to')
cc = headers.get('cc')
bcc = headers.get('bcc')
in_reply_to = headers.get('in-reply-to')
mid = headers.get('message-id')
return (date, subject, parseAddr(from_), parseAddr(sender),
reply_to and parseAddr(reply_to), to and parseAddr(to),
cc and parseAddr(cc), bcc and parseAddr(bcc), in_reply_to, mid)
def getLineCount(msg):
# XXX - Super expensive, CACHE THIS VALUE FOR LATER RE-USE
# XXX - This must be the number of lines in the ENCODED version
lines = 0
for _ in msg.getBodyFile():
lines += 1
return lines
def unquote(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def getBodyStructure(msg, extended=False):
# XXX - This does not properly handle multipart messages
# BODYSTRUCTURE is obscenely complex and criminally under-documented.
attrs = {}
headers = 'content-type', 'content-id', 'content-description', 'content-transfer-encoding'
headers = msg.getHeaders(False, *headers)
mm = headers.get('content-type')
if mm:
mm = ''.join(mm.splitlines())
mimetype = mm.split(';')
if mimetype:
type = mimetype[0].split('/', 1)
if len(type) == 1:
major = type[0]
minor = None
elif len(type) == 2:
major, minor = type
else:
major = minor = None
attrs = dict([x.strip().lower().split('=', 1) for x in mimetype[1:]])
else:
major = minor = None
else:
major = minor = None
size = str(msg.getSize())
unquotedAttrs = [(k, unquote(v)) for (k, v) in attrs.iteritems()]
result = [
major, minor, # Main and Sub MIME types
unquotedAttrs, # content-type parameter list
headers.get('content-id'),
headers.get('content-description'),
headers.get('content-transfer-encoding'),
size, # Number of octets total
]
if major is not None:
if major.lower() == 'text':
result.append(str(getLineCount(msg)))
elif (major.lower(), minor.lower()) == ('message', 'rfc822'):
contained = msg.getSubPart(0)
result.append(getEnvelope(contained))
result.append(getBodyStructure(contained, False))
result.append(str(getLineCount(contained)))
if not extended or major is None:
return result
if major.lower() != 'multipart':
headers = 'content-md5', 'content-disposition', 'content-language'
headers = msg.getHeaders(False, *headers)
disp = headers.get('content-disposition')
# XXX - I dunno if this is really right
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(headers.get('content-md5'))
result.append(disp)
result.append(headers.get('content-language'))
else:
result = [result]
try:
i = 0
while True:
submsg = msg.getSubPart(i)
result.append(getBodyStructure(submsg))
i += 1
except IndexError:
result.append(minor)
result.append(attrs.items())
# XXX - I dunno if this is really right
headers = msg.getHeaders(False, 'content-disposition', 'content-language')
disp = headers.get('content-disposition')
if disp:
disp = disp.split('; ')
if len(disp) == 1:
disp = (disp[0].lower(), None)
elif len(disp) > 1:
disp = (disp[0].lower(), [x.split('=') for x in disp[1:]])
result.append(disp)
result.append(headers.get('content-language'))
return result
class IMessagePart(Interface):
def getHeaders(negate, *names):
"""Retrieve a group of message headers.
@type names: C{tuple} of C{str}
@param names: The names of the headers to retrieve or omit.
@type negate: C{bool}
@param negate: If True, indicates that the headers listed in C{names}
should be omitted from the return value, rather than included.
@rtype: C{dict}
@return: A mapping of header field names to header field values
"""
def getBodyFile():
"""Retrieve a file object containing only the body of this message.
"""
def getSize():
"""Retrieve the total size, in octets, of this message.
@rtype: C{int}
"""
def isMultipart():
"""Indicate whether this message has subparts.
@rtype: C{bool}
"""
def getSubPart(part):
"""Retrieve a MIME sub-message
@type part: C{int}
@param part: The number of the part to retrieve, indexed from 0.
@raise IndexError: Raised if the specified part does not exist.
@raise TypeError: Raised if this message is not multipart.
@rtype: Any object implementing C{IMessagePart}.
@return: The specified sub-part.
"""
class IMessage(IMessagePart):
def getUID():
"""Retrieve the unique identifier associated with this message.
"""
def getFlags():
"""Retrieve the flags associated with this message.
@rtype: C{iterable}
@return: The flags, represented as strings.
"""
def getInternalDate():
"""Retrieve the date internally associated with this message.
@rtype: C{str}
@return: An RFC822-formatted date string.
"""
class IMessageFile(Interface):
"""Optional message interface for representing messages as files.
If provided by message objects, this interface will be used instead
the more complex MIME-based interface.
"""
def open():
"""Return an file-like object opened for reading.
Reading from the returned file will return all the bytes
of which this message consists.
"""
class ISearchableMailbox(Interface):
def search(query, uid):
"""Search for messages that meet the given query criteria.
If this interface is not implemented by the mailbox, L{IMailbox.fetch}
and various methods of L{IMessage} will be used instead.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
@type query: C{list}
@param query: The search criteria
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{list} or C{Deferred}
@return: A list of message sequence numbers or message UIDs which
match the search criteria or a C{Deferred} whose callback will be
invoked with such a list.
"""
class IMessageCopier(Interface):
def copy(messageObject):
"""Copy the given message object into this mailbox.
The message object will be one which was previously returned by
L{IMailbox.fetch}.
Implementations which wish to offer better performance than the
default implementation should implement this interface.
If this interface is not implemented by the mailbox, IMailbox.addMessage
will be used instead.
@rtype: C{Deferred} or C{int}
@return: Either the UID of the message or a Deferred which fires
with the UID when the copy finishes.
"""
class IMailboxInfo(Interface):
"""Interface specifying only the methods required for C{listMailboxes}.
Implementations can return objects implementing only these methods for
return to C{listMailboxes} if it can allow them to operate more
efficiently.
"""
def getFlags():
"""Return the flags defined in this mailbox
Flags with the \\ prefix are reserved for use as system flags.
@rtype: C{list} of C{str}
@return: A list of the flags that can be set on messages in this mailbox.
"""
def getHierarchicalDelimiter():
"""Get the character which delimits namespaces for in this mailbox.
@rtype: C{str}
"""
class IMailbox(IMailboxInfo):
def getUIDValidity():
"""Return the unique validity identifier for this mailbox.
@rtype: C{int}
"""
def getUIDNext():
"""Return the likely UID for the next message added to this mailbox.
@rtype: C{int}
"""
def getUID(message):
"""Return the UID of a message in the mailbox
@type message: C{int}
@param message: The message sequence number
@rtype: C{int}
@return: The UID of the message.
"""
def getMessageCount():
"""Return the number of messages in this mailbox.
@rtype: C{int}
"""
def getRecentCount():
"""Return the number of messages with the 'Recent' flag.
@rtype: C{int}
"""
def getUnseenCount():
"""Return the number of messages with the 'Unseen' flag.
@rtype: C{int}
"""
def isWriteable():
"""Get the read/write status of the mailbox.
@rtype: C{int}
@return: A true value if write permission is allowed, a false value otherwise.
"""
def destroy():
"""Called before this mailbox is deleted, permanently.
If necessary, all resources held by this mailbox should be cleaned
up here. This function _must_ set the \\Noselect flag on this
mailbox.
"""
def requestStatus(names):
"""Return status information about this mailbox.
Mailboxes which do not intend to do any special processing to
generate the return value, C{statusRequestHelper} can be used
to build the dictionary by calling the other interface methods
which return the data for each name.
@type names: Any iterable
@param names: The status names to return information regarding.
The possible values for each name are: MESSAGES, RECENT, UIDNEXT,
UIDVALIDITY, UNSEEN.
@rtype: C{dict} or C{Deferred}
@return: A dictionary containing status information about the
requested names is returned. If the process of looking this
information up would be costly, a deferred whose callback will
eventually be passed this dictionary is returned instead.
"""
def addListener(listener):
"""Add a mailbox change listener
@type listener: Any object which implements C{IMailboxListener}
@param listener: An object to add to the set of those which will
be notified when the contents of this mailbox change.
"""
def removeListener(listener):
"""Remove a mailbox change listener
@type listener: Any object previously added to and not removed from
this mailbox as a listener.
@param listener: The object to remove from the set of listeners.
@raise ValueError: Raised when the given object is not a listener for
this mailbox.
"""
def addMessage(message, flags = (), date = None):
"""Add the given message to this mailbox.
@type message: A file-like object
@param message: The RFC822 formatted message
@type flags: Any iterable of C{str}
@param flags: The flags to associate with this message
@type date: C{str}
@param date: If specified, the date to associate with this
message.
@rtype: C{Deferred}
@return: A deferred whose callback is invoked with the message
id if the message is added successfully and whose errback is
invoked otherwise.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def expunge():
"""Remove all messages flagged \\Deleted.
@rtype: C{list} or C{Deferred}
@return: The list of message sequence numbers which were deleted,
or a C{Deferred} whose callback will be invoked with such a list.
@raise ReadOnlyMailbox: Raised if this Mailbox is not open for
read-write.
"""
def fetch(messages, uid):
"""Retrieve one or more messages.
@type messages: C{MessageSet}
@param messages: The identifiers of messages to retrieve information
about
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: Any iterable of two-tuples of message sequence numbers and
implementors of C{IMessage}.
"""
def store(messages, flags, mode, uid):
"""Set the flags of one or more messages.
@type messages: A MessageSet object with the list of messages requested
@param messages: The identifiers of the messages to set the flags of.
@type flags: sequence of C{str}
@param flags: The flags to set, unset, or add.
@type mode: -1, 0, or 1
@param mode: If mode is -1, these flags should be removed from the
specified messages. If mode is 1, these flags should be added to
the specified messages. If mode is 0, all existing flags should be
cleared and these flags should be added.
@type uid: C{bool}
@param uid: If true, the IDs specified in the query are UIDs;
otherwise they are message sequence IDs.
@rtype: C{dict} or C{Deferred}
@return: A C{dict} mapping message sequence numbers to sequences of C{str}
representing the flags set on the message after this operation has
been performed, or a C{Deferred} whose callback will be invoked with
such a C{dict}.
@raise ReadOnlyMailbox: Raised if this mailbox is not open for
read-write.
"""
class ICloseableMailbox(Interface):
"""A supplementary interface for mailboxes which require cleanup on close.
Implementing this interface is optional. If it is implemented, the protocol
code will call the close method defined whenever a mailbox is closed.
"""
def close():
"""Close this mailbox.
@return: A C{Deferred} which fires when this mailbox
has been closed, or None if the mailbox can be closed
immediately.
"""
def _formatHeaders(headers):
hdrs = [': '.join((k.title(), '\r\n'.join(v.splitlines()))) for (k, v)
in headers.iteritems()]
hdrs = '\r\n'.join(hdrs) + '\r\n'
return hdrs
def subparts(m):
i = 0
try:
while True:
yield m.getSubPart(i)
i += 1
except IndexError:
pass
def iterateInReactor(i):
"""Consume an interator at most a single iteration per reactor iteration.
If the iterator produces a Deferred, the next iteration will not occur
until the Deferred fires, otherwise the next iteration will be taken
in the next reactor iteration.
@rtype: C{Deferred}
@return: A deferred which fires (with None) when the iterator is
exhausted or whose errback is called if there is an exception.
"""
from twisted.internet import reactor
d = defer.Deferred()
def go(last):
try:
r = i.next()
except StopIteration:
d.callback(last)
except:
d.errback()
else:
if isinstance(r, defer.Deferred):
r.addCallback(go)
else:
reactor.callLater(0, go, r)
go(None)
return d
class MessageProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
def __init__(self, msg, buffer = None, scheduler = None):
"""Produce this message.
@param msg: The message I am to produce.
@type msg: L{IMessage}
@param buffer: A buffer to hold the message in. If None, I will
use a L{tempfile.TemporaryFile}.
@type buffer: file-like
"""
self.msg = msg
if buffer is None:
buffer = tempfile.TemporaryFile()
self.buffer = buffer
if scheduler is None:
scheduler = iterateInReactor
self.scheduler = scheduler
self.write = self.buffer.write
def beginProducing(self, consumer):
self.consumer = consumer
return self.scheduler(self._produce())
def _produce(self):
headers = self.msg.getHeaders(True)
boundary = None
if self.msg.isMultipart():
content = headers.get('content-type')
parts = [x.split('=', 1) for x in content.split(';')[1:]]
parts = dict([(k.lower().strip(), v) for (k, v) in parts])
boundary = parts.get('boundary')
if boundary is None:
# Bastards
boundary = '----=_%f_boundary_%f' % (time.time(), random.random())
headers['content-type'] += '; boundary="%s"' % (boundary,)
else:
if boundary.startswith('"') and boundary.endswith('"'):
boundary = boundary[1:-1]
self.write(_formatHeaders(headers))
self.write('\r\n')
if self.msg.isMultipart():
for p in subparts(self.msg):
self.write('\r\n--%s\r\n' % (boundary,))
yield MessageProducer(p, self.buffer, self.scheduler
).beginProducing(None
)
self.write('\r\n--%s--\r\n' % (boundary,))
else:
f = self.msg.getBodyFile()
while True:
b = f.read(self.CHUNK_SIZE)
if b:
self.buffer.write(b)
yield None
else:
break
if self.consumer:
self.buffer.seek(0, 0)
yield FileProducer(self.buffer
).beginProducing(self.consumer
).addCallback(lambda _: self
)
class _FetchParser:
class Envelope:
# Response should be a list of fields from the message:
# date, subject, from, sender, reply-to, to, cc, bcc, in-reply-to,
# and message-id.
#
# from, sender, reply-to, to, cc, and bcc are themselves lists of
# address information:
# personal name, source route, mailbox name, host name
#
# reply-to and sender must not be None. If not present in a message
# they should be defaulted to the value of the from field.
type = 'envelope'
__str__ = lambda self: 'envelope'
class Flags:
type = 'flags'
__str__ = lambda self: 'flags'
class InternalDate:
type = 'internaldate'
__str__ = lambda self: 'internaldate'
class RFC822Header:
type = 'rfc822header'
__str__ = lambda self: 'rfc822.header'
class RFC822Text:
type = 'rfc822text'
__str__ = lambda self: 'rfc822.text'
class RFC822Size:
type = 'rfc822size'
__str__ = lambda self: 'rfc822.size'
class RFC822:
type = 'rfc822'
__str__ = lambda self: 'rfc822'
class UID:
type = 'uid'
__str__ = lambda self: 'uid'
class Body:
type = 'body'
peek = False
header = None
mime = None
text = None
part = ()
empty = False
partialBegin = None
partialLength = None
def __str__(self):
base = 'BODY'
part = ''
separator = ''
if self.part:
part = '.'.join([str(x + 1) for x in self.part])
separator = '.'
# if self.peek:
# base += '.PEEK'
if self.header:
base += '[%s%s%s]' % (part, separator, self.header,)
elif self.text:
base += '[%s%sTEXT]' % (part, separator)
elif self.mime:
base += '[%s%sMIME]' % (part, separator)
elif self.empty:
base += '[%s]' % (part,)
if self.partialBegin is not None:
base += '<%d.%d>' % (self.partialBegin, self.partialLength)
return base
class BodyStructure:
type = 'bodystructure'
__str__ = lambda self: 'bodystructure'
# These three aren't top-level, they don't need type indicators
class Header:
negate = False
fields = None
part = None
def __str__(self):
base = 'HEADER'
if self.fields:
base += '.FIELDS'
if self.negate:
base += '.NOT'
fields = []
for f in self.fields:
f = f.title()
if _needsQuote(f):
f = _quote(f)
fields.append(f)
base += ' (%s)' % ' '.join(fields)
if self.part:
base = '.'.join([str(x + 1) for x in self.part]) + '.' + base
return base
class Text:
pass
class MIME:
pass
parts = None
_simple_fetch_att = [
('envelope', Envelope),
('flags', Flags),
('internaldate', InternalDate),
('rfc822.header', RFC822Header),
('rfc822.text', RFC822Text),
('rfc822.size', RFC822Size),
('rfc822', RFC822),
('uid', UID),
('bodystructure', BodyStructure),
]
def __init__(self):
self.state = ['initial']
self.result = []
self.remaining = ''
def parseString(self, s):
s = self.remaining + s
try:
while s or self.state:
# print 'Entering state_' + self.state[-1] + ' with', repr(s)
state = self.state.pop()
try:
used = getattr(self, 'state_' + state)(s)
except:
self.state.append(state)
raise
else:
# print state, 'consumed', repr(s[:used])
s = s[used:]
finally:
self.remaining = s
def state_initial(self, s):
# In the initial state, the literals "ALL", "FULL", and "FAST"
# are accepted, as is a ( indicating the beginning of a fetch_att
# token, as is the beginning of a fetch_att token.
if s == '':
return 0
l = s.lower()
if l.startswith('all'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope()
))
return 3
if l.startswith('full'):
self.result.extend((
self.Flags(), self.InternalDate(),
self.RFC822Size(), self.Envelope(),
self.Body()
))
return 4
if l.startswith('fast'):
self.result.extend((
self.Flags(), self.InternalDate(), self.RFC822Size(),
))
return 4
if l.startswith('('):
self.state.extend(('close_paren', 'maybe_fetch_att', 'fetch_att'))
return 1
self.state.append('fetch_att')
return 0
def state_close_paren(self, s):
if s.startswith(')'):
return 1
raise Exception("Missing )")
def state_whitespace(self, s):
# Eat up all the leading whitespace
if not s or not s[0].isspace():
raise Exception("Whitespace expected, none found")
i = 0
for i in range(len(s)):
if not s[i].isspace():
break
return i
def state_maybe_fetch_att(self, s):
if not s.startswith(')'):
self.state.extend(('maybe_fetch_att', 'fetch_att', 'whitespace'))
return 0
def state_fetch_att(self, s):
# Allowed fetch_att tokens are "ENVELOPE", "FLAGS", "INTERNALDATE",
# "RFC822", "RFC822.HEADER", "RFC822.SIZE", "RFC822.TEXT", "BODY",
# "BODYSTRUCTURE", "UID",
# "BODY [".PEEK"] [<section>] ["<" <number> "." <nz_number> ">"]
l = s.lower()
for (name, cls) in self._simple_fetch_att:
if l.startswith(name):
self.result.append(cls())
return len(name)
b = self.Body()
if l.startswith('body.peek'):
b.peek = True
used = 9
elif l.startswith('body'):
used = 4
else:
raise Exception("Nothing recognized in fetch_att: %s" % (l,))
self.pending_body = b
self.state.extend(('got_body', 'maybe_partial', 'maybe_section'))
return used
def state_got_body(self, s):
self.result.append(self.pending_body)
del self.pending_body
return 0
def state_maybe_section(self, s):
if not s.startswith("["):
return 0
self.state.extend(('section', 'part_number'))
return 1
_partExpr = re.compile(r'(\d+(?:\.\d+)*)\.?')
def state_part_number(self, s):
m = self._partExpr.match(s)
if m is not None:
self.parts = [int(p) - 1 for p in m.groups()[0].split('.')]
return m.end()
else:
self.parts = []
return 0
def state_section(self, s):
# Grab "HEADER]" or "HEADER.FIELDS (Header list)]" or
# "HEADER.FIELDS.NOT (Header list)]" or "TEXT]" or "MIME]" or
# just "]".
l = s.lower()
used = 0
if l.startswith(']'):
self.pending_body.empty = True
used += 1
elif l.startswith('header]'):
h = self.pending_body.header = self.Header()
h.negate = True
h.fields = ()
used += 7
elif l.startswith('text]'):
self.pending_body.text = self.Text()
used += 5
elif l.startswith('mime]'):
self.pending_body.mime = self.MIME()
used += 5
else:
h = self.Header()
if l.startswith('header.fields.not'):
h.negate = True
used += 17
elif l.startswith('header.fields'):
used += 13
else:
raise Exception("Unhandled section contents: %r" % (l,))
self.pending_body.header = h
self.state.extend(('finish_section', 'header_list', 'whitespace'))
self.pending_body.part = tuple(self.parts)
self.parts = None
return used
def state_finish_section(self, s):
if not s.startswith(']'):
raise Exception("section must end with ]")
return 1
def state_header_list(self, s):
if not s.startswith('('):
raise Exception("Header list must begin with (")
end = s.find(')')
if end == -1:
raise Exception("Header list must end with )")
headers = s[1:end].split()
self.pending_body.header.fields = map(str.upper, headers)
return end + 1
def state_maybe_partial(self, s):
# Grab <number.number> or nothing at all
if not s.startswith('<'):
return 0
end = s.find('>')
if end == -1:
raise Exception("Found < but not >")
partial = s[1:end]
parts = partial.split('.', 1)
if len(parts) != 2:
raise Exception("Partial specification did not include two .-delimited integers")
begin, length = map(int, parts)
self.pending_body.partialBegin = begin
self.pending_body.partialLength = length
return end + 1
class FileProducer:
CHUNK_SIZE = 2 ** 2 ** 2 ** 2
firstWrite = True
def __init__(self, f):
self.f = f
def beginProducing(self, consumer):
self.consumer = consumer
self.produce = consumer.write
d = self._onDone = defer.Deferred()
self.consumer.registerProducer(self, False)
return d
def resumeProducing(self):
b = ''
if self.firstWrite:
b = '{%d}\r\n' % self._size()
self.firstWrite = False
if not self.f:
return
b = b + self.f.read(self.CHUNK_SIZE)
if not b:
self.consumer.unregisterProducer()
self._onDone.callback(self)
self._onDone = self.f = self.consumer = None
else:
self.produce(b)
def pauseProducing(self):
pass
def stopProducing(self):
pass
def _size(self):
b = self.f.tell()
self.f.seek(0, 2)
e = self.f.tell()
self.f.seek(b, 0)
return e - b
def parseTime(s):
# XXX - This may require localization :(
months = [
'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november', 'december'
]
expr = {
'day': r"(?P<day>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'mon': r"(?P<mon>\w+)",
'year': r"(?P<year>\d\d\d\d)"
}
m = re.match('%(day)s-%(mon)s-%(year)s' % expr, s)
if not m:
raise ValueError, "Cannot parse time string %r" % (s,)
d = m.groupdict()
try:
d['mon'] = 1 + (months.index(d['mon'].lower()) % 12)
d['year'] = int(d['year'])
d['day'] = int(d['day'])
except ValueError:
raise ValueError, "Cannot parse time string %r" % (s,)
else:
return time.struct_time(
(d['year'], d['mon'], d['day'], 0, 0, 0, -1, -1, -1)
)
import codecs
def modified_base64(s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encoder(s):
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return (''.join(r), len(s))
def decoder(s):
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(modified_unbase64(''.join(decode[1:])))
return (''.join(r), len(s))
class StreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return decoder(s)
class StreamWriter(codecs.StreamWriter):
def decode(self, s, errors='strict'):
return encoder(s)
def imap4_utf_7(name):
if name == 'imap4-utf-7':
return (encoder, decoder, StreamReader, StreamWriter)
codecs.register(imap4_utf_7)
__all__ = [
# Protocol classes
'IMAP4Server', 'IMAP4Client',
# Interfaces
'IMailboxListener', 'IClientAuthentication', 'IAccount', 'IMailbox',
'INamespacePresenter', 'ICloseableMailbox', 'IMailboxInfo',
'IMessage', 'IMessageCopier', 'IMessageFile', 'ISearchableMailbox',
# Exceptions
'IMAP4Exception', 'IllegalClientResponse', 'IllegalOperation',
'IllegalMailboxEncoding', 'UnhandledResponse', 'NegativeResponse',
'NoSupportedAuthentication', 'IllegalServerResponse',
'IllegalIdentifierError', 'IllegalQueryError', 'MismatchedNesting',
'MismatchedQuoting', 'MailboxException', 'MailboxCollision',
'NoSuchMailbox', 'ReadOnlyMailbox',
# Auth objects
'CramMD5ClientAuthenticator', 'PLAINAuthenticator', 'LOGINAuthenticator',
'PLAINCredentials', 'LOGINCredentials',
# Simple query interface
'Query', 'Not', 'Or',
# Miscellaneous
'MemoryAccount',
'statusRequestHelper',
]
|
bsd-3-clause
| -8,669,594,478,378,295,000
| 33.044453
| 125
| 0.570752
| false
| 4.179739
| false
| false
| false
|
BeetleChunks/redsails
|
rsCrypto/rsCrypto.py
|
1
|
2008
|
import base64
import ctypes
import sys
from Crypto.Cipher import AES
from Crypto import Random
from pbkdf2 import PBKDF2
class iv:
def __init__(self, bs):
self.bs = bs
self.usedIVs = set()
self.initializeIV()
def initializeIV(self):
self.value = Random.new().read(self.bs)
self.usedIVs.add(self.value)
def setNextIV(self):
self.value = Random.new().read(self.bs)
if self.value in self.usedIVs:
self.setNextIV()
else:
self.usedIVs.add(self.value)
class AESCipher:
def __init__(self, key):
# Ill implement this later -_-
#self.keyDerivation = PBKDF2(key, os.urandom(8)).read(32)
self.keyDerivation = PBKDF2(key, "DEADLIST").read(32)
self.iv = iv(AES.block_size)
self.tryToKillPasswordInMemory(key)
def encrypt(self, payload):
payload = self.addPadding(payload)
cipher = AES.new(self.keyDerivation, AES.MODE_CBC, self.iv.value)
cipherText = base64.b64encode(self.iv.value + cipher.encrypt(payload))
self.iv.setNextIV()
return cipherText
def decrypt(self, payload):
cipherText = base64.b64decode(payload)
iv = cipherText[:16]
cipher = AES.new(self.keyDerivation, AES.MODE_CBC, iv)
paddedText = cipher.decrypt(cipherText[16:])
plainText = self.subPadding(paddedText)
return plainText
# TODO: Use p-rand int for char() for more padding entropy
def addPadding(self, payload):
length = 16 - (len(payload) % 16)
payload += chr(length)*length
return payload
def subPadding(self, payload):
return payload.rstrip(payload[-1])
# Great example of why I like C more than Python...
def tryToKillPasswordInMemory(self, keyToDelete):
tmpStrToGetHeader = "This is a temp string to get header"
header = ctypes.string_at(id(tmpStrToGetHeader), sys.getsizeof(tmpStrToGetHeader)).find(tmpStrToGetHeader)
location = id(keyToDelete) + header
size = sys.getsizeof(keyToDelete) - header
memset = ctypes.cdll.msvcrt.memset
memset(location, 0, size)
|
gpl-3.0
| 5,536,739,678,931,007,000
| 24.8
| 108
| 0.693227
| false
| 2.983655
| false
| false
| false
|
riccardofreixo/ansible_ec2_elb_healthcheck
|
ec2_elb_healthcheck/ec2_elb_healthcheck.py
|
1
|
4619
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Riccardo Freixo
"""
Simple Ansible module to health check instances in an ELB
"""
DOCUMENTATION = '''
---
module: ec2_elb_healthcheck
version_added: "1.8"
short_description: Get instance Health Check state from ELBs
description:
- Gets instance Health Check states from ELBs.
author: Riccardo Freixo
options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
name:
description:
- The name of the ELB.
required: true
instances:
description:
- A list of instance IDs to get Health Check states from.
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Check health of two instances attached to elb myelb
- ec2_elb_healthcheck:
region: eu-west-1
name: my-elb
instances:
- i-1157af42
- i-b514da21
# Check health of all instances attached to elb myelb
- ec2_elb_healthcheck:
region: eu-west-1
name: my-elb
'''
import sys
try:
import boto
import boto.ec2
import boto.ec2.elb
import boto.ec2.elb.attributes
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
def check_instances_health(connection, elb, ids):
"""
Returns a dict with the state of each instance in 'ids'.
:type connection: :class:`boto.ec2.connection.EC2Connection`
:param connection: a connection to ec2
:type elb: str
:param elb: the name of the ELB to health check.
:type ids: list
:param ids: a list of instance IDs to health check.
:rtype: dict
:return: Returns a dict with the state of each instance in 'ids'.
"""
try:
instances = connection.describe_instance_health(elb)
except boto.exception.EC2ResponseError, error:
module.fail_json(msg=str(error))
healthcheck = {instance.instance_id: instance.state for instance in instances if instance.instance_id in ids}
for instance_not_found in set(ids) - set(healthcheck.keys()):
healthcheck[instance_not_found] = 'NotFound'
instances_in_service = [k for k, v in healthcheck.iteritems() if v == 'InService']
all_in_service = True if len(instances_in_service) == len(ids) else False
return dict(
all_in_service=all_in_service,
instances=healthcheck
)
def check_all_instances_health(connection, elb):
"""
Returns a dict with the state of each instance attached to the ELB 'elb'.
:type connection: :class:`boto.ec2.connection.EC2Connection`
:param connection: a connection to ec2
:type elb: str
:param elb: the name of the ELB to health check.
:rtype: dict
:return: Returns a dict with the state of each instance attached to the ELB 'elb'.
"""
try:
instances = connection.describe_instance_health(elb)
except boto.exception.EC2ResponseError, error:
module.fail_json(msg=str(error))
healthcheck = {instance.instance_id: instance.state for instance in instances}
instances_in_service = [k for k, v in healthcheck.iteritems() if v == 'InService']
all_in_service = True if len(instances_in_service) == len(instances) else False
return dict(
all_in_service=all_in_service,
instances=healthcheck
)
def main():
"""Main function"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
instances=dict(type='list')
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, error:
module.fail_json(msg=str(error))
name = module.params.get('name')
instances = module.params.get('instances')
if instances is not None:
results = check_instances_health(connection, name, ids=instances)
else:
results = check_all_instances_health(connection, name)
module.exit_json(
changed=False,
**results
)
if __name__ == "__main__":
main()
|
gpl-3.0
| 7,849,643,962,396,327,000
| 27.337423
| 119
| 0.658584
| false
| 3.725
| false
| false
| false
|
nsdont/gogs_ci_demo
|
superlists/superlists/settings.py
|
1
|
2664
|
"""
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_@3#)mt6d@mk73wnt-rq47@0d5*fe5kqmshd_-nj*^43d92!rz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
mit
| 6,567,312,469,750,804,000
| 24.615385
| 71
| 0.691066
| false
| 3.482353
| false
| false
| false
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_jira/pyunit_hexdev_296_confusion_matrices.py
|
1
|
1271
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def confusion_matrices_check():
local_data = [[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[1, 'a'],[0, 'b'],
[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b'],[0, 'b']]
h2o_data = h2o.H2OFrame(python_obj=zip(*local_data))
h2o_data.set_names(['response', 'predictor'])
h2o_data.show()
gbm = h2o.gbm(x=h2o_data[1:], y=h2o_data["response"].asfactor(), ntrees=1, distribution="bernoulli")
gbm.show()
perf = gbm.model_performance()
tps = perf.metric("tps", [perf.find_threshold_by_max_metric("f1")])[0][1]
tns = perf.metric("tns", [perf.find_threshold_by_max_metric("f1")])[0][1]
fps = perf.metric("fps", [perf.find_threshold_by_max_metric("f1")])[0][1]
fns = perf.metric("fns", [perf.find_threshold_by_max_metric("f1")])[0][1]
assert tps + tns + fps + fns == 20, "incorrect confusion matrix computation: tps: {0}, fps: {1}, tns: {2}, fns: " \
"{3}. Should sum to 20.".format(tps, fps, tns, fns)
if __name__ == "__main__":
pyunit_utils.standalone_test(confusion_matrices_check)
else:
confusion_matrices_check()
|
apache-2.0
| -4,116,032,665,391,225,000
| 36.382353
| 119
| 0.543666
| false
| 2.526839
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.