repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
wiki-ai/revscoring | refs/heads/master | examples/language_support.py | 3 | from revscoring.datasources.revision_oriented import revision
from revscoring.dependencies import solve
from revscoring.languages import english, spanish
features = [english.informals.revision.matches,
spanish.informals.revision.matches]
values = solve(features, cache={revision.text: "I think it is stupid."})
for feature, value in zip(features, values):
print("\t{0}: {1}".format(feature, repr(value)))
|
zhangziang/django-allauth | refs/heads/master | allauth/socialaccount/providers/spotify/views.py | 66 | from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
import requests
from .provider import SpotifyOAuth2Provider
class SpotifyOAuth2Adapter(OAuth2Adapter):
provider_id = SpotifyOAuth2Provider.id
access_token_url = 'https://accounts.spotify.com/api/token'
authorize_url = 'https://accounts.spotify.com/authorize'
profile_url = 'https://api.spotify.com/v1/me'
def complete_login(self, request, app, token, **kwargs):
extra_data = requests.get(self.profile_url, params={
'access_token': token.token
})
return self.get_provider().sociallogin_from_response(
request,
extra_data.json()
)
oauth_login = OAuth2LoginView.adapter_view(SpotifyOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(SpotifyOAuth2Adapter)
|
google/google-ctf | refs/heads/master | third_party/edk2/AppPkg/Applications/Python/Python-2.7.10/Lib/encodings/cp500.py | 93 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
astooke/gtimer | refs/heads/master | docs/source/examples/loop_3.py | 1 | import gtimer as gt
import time
time.sleep(0.1)
gt.stamp('first')
loop = gt.timed_loop('named_loop')
x = 0
while x < 3:
next(loop)
time.sleep(0.1)
x += 1
gt.stamp('loop')
loop.exit()
time.sleep(0.1)
gt.stamp('second')
print gt.report(include_itrs=False) |
phoebusliang/parallel-lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/modeltests/defer/tests.py | 92 | from django.db.models.query_utils import DeferredAttribute
from django.test import TestCase
from models import Secondary, Primary, Child, BigChild
class DeferTests(TestCase):
def assert_delayed(self, obj, num):
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname),
DeferredAttribute):
count += 1
self.assertEqual(count, num)
def test_defer(self):
# To all outward appearances, instances with deferred fields look the
# same as normal instances when we examine attribute values. Therefore
# we test for the number of deferred fields on returned instances (by
# poking at the internals), as a way to observe what is going on.
s1 = Secondary.objects.create(first="x1", second="y1")
p1 = Primary.objects.create(name="p1", value="xx", related=s1)
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.defer("related__first")[0], 0)
obj = qs.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assertEqual(obj.related_id, s1.pk)
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
self.assert_delayed(qs.only("name", "value").defer("value")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
obj = qs.only()[0]
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(qs.defer("name").values()[0], {
"id": p1.id,
"name": "p1",
"value": "xx",
"related_id": s1.id,
})
self.assertEqual(qs.only("name").values()[0], {
"id": p1.id,
"name": "p1",
"value": "xx",
"related_id": s1.id,
})
# Using defer() and only() with get() is also valid.
self.assert_delayed(qs.defer("name").get(pk=p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=p1.pk), 2)
# DOES THIS WORK?
self.assert_delayed(qs.only("name").select_related("related")[0], 1)
self.assert_delayed(qs.defer("related").select_related("related")[0], 0)
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
obj = Primary.objects.defer("value").get(name="p1")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"a new name",
],
lambda p: p.name
)
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
obj.name = "c2"
obj.save()
# You can retrive a single column on a base class with no fields
obj = Child.objects.only("name").get(name="c2")
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c2")
self.assertEqual(obj.value, "foo")
obj.name = "cc"
obj.save()
BigChild.objects.create(name="b1", value="foo", related=s1, other="bar")
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b2"
obj.save()
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b2")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b2")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b3"
obj.save()
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b3")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b3")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "b4"
obj.save()
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("other").get(name="b4")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b4")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
obj.name = "bb"
obj.save()
|
40123142/finalexam | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/signal.py | 743 | """This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
|
thomaxxl/safrs | refs/heads/master | expose_existing/sqlacodegen/setup.py | 1 | from setuptools import setup
setup(
use_scm_version={"version_scheme": "post-release", "local_scheme": "dirty-tag"},
setup_requires=["setuptools >= 36.2.7", "setuptools_scm >= 1.7.0"],
)
|
robertding/vo | refs/heads/master | test/test_download.py | 1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : RobertDing
# E-mail : robertdingx@gmail.com
# Date : 15/08/29 19:49:17
# Desc : 测试下载
#
from __future__ import absolute_import, division, with_statement
|
zenodo/invenio | refs/heads/zenodo-master | invenio/modules/tickets/testsuite/__init__.py | 12133432 | |
NelleV/pyconfr-test | refs/heads/master | symposion/models.py | 12133432 | |
Reality9/spiderfoot | refs/heads/master | ext/metapdf/__init__.py | 9 | from metapdf import MetaPdfReader
__all__ = ["metapdf"]
|
ofer43211/unisubs | refs/heads/staging | apps/webdriver_testing/check_videos/test_performance.py | 5 | import os
from webdriver_testing.webdriver_base import WebdriverTestCase
from webdriver_testing.pages.site_pages import video_language_page
from webdriver_testing.pages.site_pages import editor_page
from webdriver_testing import data_helpers
from utils.factories import *
class TestCaseEditUploaded(WebdriverTestCase):
"""TestSuite large subtitle sets """
NEW_BROWSER_PER_TEST_CASE = False
@classmethod
def setUpClass(cls):
super(TestCaseEditUploaded, cls).setUpClass()
cls.data_utils = data_helpers.DataHelpers()
cls.user = UserFactory()
cls.video_language_pg = video_language_page.VideoLanguagePage(cls)
cls.editor_pg = editor_page.EditorPage(cls)
cls.subs_data_dir = os.path.join(os.getcwd(), 'apps',
'webdriver_testing', 'subtitle_data')
def test_edit_large(self):
"""Upload a large set of subtitles then open for editing. """
video = VideoFactory()
data = {'language_code': 'en',
'video': video,
'subtitles': ('apps/webdriver_testing/subtitle_data/'
'How-to.en.srt'),
}
r = self.data_utils.add_subs(**data)
fr_data = {'language_code': 'fr',
'video': video,
'complete': False,
'parents': [video.subtitle_language('en').get_tip()],
'subtitles': ('apps/webdriver_testing/subtitle_data/'
'srt-full.srt')
}
r = self.data_utils.add_subs(**fr_data)
self.video_language_pg.open_video_lang_page(video.video_id, 'fr')
self.video_language_pg.log_in(self.user.username, 'password')
self.editor_pg.open_editor_page(video.video_id, 'fr')
self.assertEqual(10, len(self.editor_pg.working_text()))
self.assertEqual(1194, len(self.editor_pg.reference_text()))
|
redglasses/android_kernel_lge_g3-V20f | refs/heads/cm-12.0 | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
Backspace-Dev/x920d-jp | refs/heads/master | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
nens/nxt-box | refs/heads/master | nxt_box/views.py | 1 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse
# from lizard_map.views import MapView
# from lizard_ui.views import UiView
# from nxt_box import models
# class TodoView(UiView):
# """Simple view without a map."""
# template_name = 'nxt_box/todo.html'
# page_title = _('TODO view')
# class Todo2View(MapView):
# """Simple view with a map."""
# template_name = 'nxt_box/todo2.html'
# page_title = _('TODO 2 view')
|
ntoll/p4p2p | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from setuptools import setup, find_packages
from p4p2p.version import get_version
setup(
name='p4p2p',
version=get_version(),
description='A platform for peer-to-peer application development.',
long_description=open('README.rst').read(),
author=open('AUTHORS').read(),
author_email='info@p4p2p.net',
url='http://p4p2p.net/',
package_dir={'': 'p4p2p'},
packages=find_packages('p4p2p'),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: No Input/Output (Daemon)',
'Framework :: Twisted',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Communications',
'Topic :: Internet',
'Topic :: System :: Distributed Computing',
],
install_requires=['twisted', ]
)
|
sjev/trading-with-python | refs/heads/master | lib/cboe.py | 1 | # -*- coding: utf-8 -*-
"""
toolset working with cboe data
@author: Jev Kuznetsov
Licence: BSD
"""
from datetime import datetime, date
import urllib.request, urllib.error, urllib.parse
from pandas import DataFrame, Index
from pandas.core import datetools
import numpy as np
import pandas as pd
def monthCode(month):
"""
perform month->code and back conversion
Input: either month nr (int) or month code (str)
Returns: code or month nr
"""
codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z')
if isinstance(month,int):
return codes[month-1]
elif isinstance(month,str):
return codes.index(month)+1
else:
raise ValueError('Function accepts int or str')
def vixExpiration(year,month):
"""
expriration date of a VX future
"""
t = datetime(year,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()!=4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_exp = t_new-datetools.relativedelta(days=30)
return t_exp
def getPutCallRatio():
""" download current Put/Call ratio"""
urlStr = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv'
try:
data = urllib.request.urlopen(urlStr)
except Exception as e:
s = "Failed to download:\n{0}".format(e);
print(s)
headerLine = 2
return pd.read_csv(data,header=headerLine,index_col=0,parse_dates=True)
def getHistoricData(symbols = ['VIX','VXV','VXMT','VVIX']):
''' get historic data from CBOE
return dataframe
'''
if not isinstance(symbols,list):
symbols = [symbols]
urls = {'VIX':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vixcurrent.csv',
'VXV':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxvdailyprices.csv',
'VXMT':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vxmtdailyprices.csv',
'VVIX':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/VVIXtimeseries.csv'}
startLines = {'VIX':1,'VXV':2,'VXMT':2,'VVIX':1}
cols = {'VIX':'VIX Close','VXV':'CLOSE','VXMT':'Close','VVIX':'VVIX'}
data = {}
for symbol in symbols:
urlStr = urls[symbol]
print('Downloading %s from %s' % (symbol,urlStr))
data[symbol] = pd.read_csv(urllib.request.urlopen(urlStr), header=startLines[symbol],index_col=0,parse_dates=True)[cols[symbol]]
return pd.DataFrame(data)
#---------------------classes--------------------------------------------
class VixFuture(object):
"""
Class for easy handling of futures data.
"""
def __init__(self,year,month):
self.year = year
self.month = month
def expirationDate(self):
return vixExpiration(self.year,self.month)
def daysLeft(self,date):
""" business days to expiration date """
from pandas import DateRange # this will cause a problem with pandas 0.14 and higher... Method is depreciated and replaced by DatetimeIndex
r = DateRange(date,self.expirationDate())
return len(r)
def __repr__(self):
return 'VX future [%i-%i %s] Exprires: %s' % (self.year,self.month,monthCode(self.month),
self.expirationDate())
#-------------------test functions---------------------------------------
def testDownload():
vix = getHistoricData('VIX')
vxv = getHistoricData('VXV')
vix.plot()
vxv.plot()
def testExpiration():
for month in range(1,13):
d = vixExpiration(2011,month)
print(d.strftime("%B, %d %Y (%A)"))
if __name__ == '__main__':
#testExpiration()
v = VixFuture(2011,11)
print(v)
print(v.daysLeft(datetime(2011,11,10)))
|
CulturaEduca/portal | refs/heads/master | blog/models.py | 1 | # -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import slugify
from django.contrib import admin
import datetime
class Autor(models.Model):
'''Classe de autores, apenas nome e descricao de cada autor'''
nome = models.CharField('Nome Completo', max_length=200)
email = models.EmailField('Email do Autor', null=True, blank=True)
site = models.CharField('Site ou URL externa', max_length=200, null=True, blank=True)
descricao = models.TextField('Descricao do Autor', null=True, blank=True)
def __unicode__(self):
return self.nome
class Meta:
verbose_name = 'Autor'
verbose_name_plural = 'Autores'
class Categoria(models.Model):
'''Separador de Conteudo'''
titulo = models.CharField('Titulo da Categoria', max_length=100)
def __unicode__(self):
return self.titulo
class Meta:
verbose_name = 'Categoria'
verbose_name_plural = 'Categorias'
class Tag(models.Model):
'''Tags para os posts'''
nome = models.CharField('Tag', max_length=100)
slug = models.SlugField('Slug Tag', null=True, blank=True, unique=True)
def save(self):
self.slug = slugify(self.nome)
super(Tag, self).save()
def __unicode__(self):
return self.nome
class Meta:
verbose_name = 'Tag'
verbose_name_plural = 'Tags'
class Post(models.Model):
'''Posts para o blog'''
categoria = models.ForeignKey(Categoria)
titulo = models.CharField(u'Título', max_length=100)
conteudo = models.TextField(u'Conteúdo')
autor = models.ForeignKey(Autor)
resumo = models.CharField(u'Resumo do conteúdo', null=True, blank=True, help_text='Se o resumo estiver em branco, os 160 caracteres iniciais do post será exibido', max_length=200)
tags = models.ManyToManyField(Tag)
imagem_destaque = models.ImageField(upload_to='destaque', help_text='Imagem de Destaque, se não tiver, aparece padrão', null=True, blank=True)
ativo = models.BooleanField('Post Ativo', help_text = 'Marque para deixar ativo, desmarcado = rascunho')
destaque = models.BooleanField('Destaque', help_text = 'Marque para deixar em destaque')
data_publicacao = models.DateTimeField(u'Data da publicação', default=datetime.datetime.now())
slug = models.SlugField('URL', null=True, blank=True, editable=False, unique=True, max_length=255)
contador_leitura = models.IntegerField('Contador de Views', default=0, editable=False)
def save(self):
self.slug = slugify(self.titulo)
super(Post, self).save()
def get_absolute_url(self):
return '/blog/%s/' % self.slug
def __unicode__(self):
return self.titulo
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
class Comentario(models.Model):
'''Comentario de usuarios'''
post = models.ForeignKey(Post)
titulo = models.CharField('Titulo do comentario', max_length=255, null=True, blank=True)
comentario = models.TextField('Comentario')
nome = models.CharField('Nome Usuario', max_length=200)
email = models.EmailField('Email do Usuario')
ip = models.IPAddressField('Ip do Usuario')
moderado = models.BooleanField('Moderar comentario')
data_hora = models.DateTimeField('Data/Hora', default=datetime.datetime.now())
def __unicode__(self):
return self.nome
class Meta:
verbose_name = 'Comentário'
verbose_name_plural = 'Comentários'
class PostAdmin(admin.ModelAdmin):
list_display = ('titulo','categoria','data_publicacao','slug','destaque','ativo','contador_leitura',)
search_fields = ('titulo','conteudo',)
list_filter = ('categoria','ativo','destaque',)
class Media:
js = [
'/static/grappelli/tinymce/jscripts/tiny_mce/tiny_mce.js',
'/static/js/tinymce_setup.js',
]
class ComentarioAdmin(admin.ModelAdmin):
list_display = ('post','nome','email','ip','data_hora','comentario','moderado',)
search_fields = ('nome',)
list_filter = ('moderado',)
admin.site.register(Post, PostAdmin)
admin.site.register(Categoria)
admin.site.register(Tag)
admin.site.register(Autor)
admin.site.register(Comentario, ComentarioAdmin) |
takahashiminoru/ryu | refs/heads/master | ryu/tests/unit/packet/test_igmp.py | 23 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import inspect
import logging
import six
from struct import pack, unpack_from, pack_into
from nose.tools import ok_, eq_, raises
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.packet_utils import checksum
from ryu.lib import addrconv
from ryu.lib.packet.igmp import igmp
from ryu.lib.packet.igmp import igmpv3_query
from ryu.lib.packet.igmp import igmpv3_report
from ryu.lib.packet.igmp import igmpv3_report_group
from ryu.lib.packet.igmp import IGMP_TYPE_QUERY
from ryu.lib.packet.igmp import IGMP_TYPE_REPORT_V3
from ryu.lib.packet.igmp import MODE_IS_INCLUDE
LOG = logging.getLogger(__name__)
class Test_igmp(unittest.TestCase):
""" Test case for Internet Group Management Protocol
"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.buf = pack(igmp._PACK_STR, self.msgtype, self.maxresp,
self.csum,
addrconv.ipv4.text_to_bin(self.address))
self.g = igmp(self.msgtype, self.maxresp, self.csum,
self.address)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmp._PACK_STR, six.binary_type(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = 20 + igmp._MIN_LEN
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmp")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.maxresp, self.maxresp)
eq_(g.csum, checksum(self.buf))
eq_(g.address, self.address)
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'maxresp': repr(self.maxresp),
'csum': repr(self.csum),
'address': repr(self.address)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmp.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
@raises(Exception)
def test_malformed_igmp(self):
m_short_buf = self.buf[1:igmp._MIN_LEN]
igmp.parser(m_short_buf)
def test_default_args(self):
ig = igmp()
buf = ig.serialize(bytearray(), None)
res = unpack_from(igmp._PACK_STR, six.binary_type(buf))
eq_(res[0], 0x11)
eq_(res[1], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmp.from_jsondict(jsondict['igmp'])
eq_(str(self.g), str(g))
class Test_igmpv3_query(unittest.TestCase):
""" Test case for Internet Group Management Protocol v3
Membership Query Message"""
def setUp(self):
self.msgtype = IGMP_TYPE_QUERY
self.maxresp = 100
self.csum = 0
self.address = '225.0.0.1'
self.s_flg = 0
self.qrv = 2
self.qqic = 10
self.num = 0
self.srcs = []
self.s_qrv = self.s_flg << 3 | self.qrv
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def setUp_with_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.maxresp, self.g.maxresp)
eq_(self.csum, self.g.csum)
eq_(self.address, self.g.address)
eq_(self.s_flg, self.g.s_flg)
eq_(self.qrv, self.g.qrv)
eq_(self.qqic, self.g.qqic)
eq_(self.num, self.g.num)
eq_(self.srcs, self.g.srcs)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.maxresp, self.maxresp)
eq_(res.csum, self.csum)
eq_(res.address, self.address)
eq_(res.s_flg, self.s_flg)
eq_(res.qrv, self.qrv)
eq_(res.qqic, self.qqic)
eq_(res.num, self.num)
eq_(res.srcs, self.srcs)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_query._PACK_STR, six.binary_type(buf))
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(res[4], self.s_qrv)
eq_(res[5], self.qqic)
eq_(res[6], self.num)
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_query._PACK_STR, six.binary_type(buf))
(src1, src2, src3) = unpack_from('4s4s4s', six.binary_type(buf),
igmpv3_query._MIN_LEN)
eq_(res[0], self.msgtype)
eq_(res[1], self.maxresp)
eq_(res[2], checksum(self.buf))
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(res[4], self.s_qrv)
eq_(res[5], self.qqic)
eq_(res[6], self.num)
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = len(ipv4()) + len(self.g)
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto, ttl=1)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmpv3_query")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.maxresp, self.maxresp)
eq_(g.csum, checksum(self.buf))
eq_(g.address, self.address)
eq_(g.s_flg, self.s_flg)
eq_(g.qrv, self.qrv)
eq_(g.qqic, self.qqic)
eq_(g.num, self.num)
eq_(g.srcs, self.srcs)
def test_build_igmp_with_srcs(self):
self.setUp_with_srcs()
self.test_build_igmp()
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'maxresp': repr(self.maxresp),
'csum': repr(self.csum),
'address': repr(self.address),
's_flg': repr(self.s_flg),
'qrv': repr(self.qrv),
'qqic': repr(self.qqic),
'num': repr(self.num),
'srcs': repr(self.srcs)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_query.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
@raises(Exception)
def test_num_larger_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) + 1
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
self.test_parser()
@raises(Exception)
def test_num_smaller_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) - 1
self.buf = pack(igmpv3_query._PACK_STR, self.msgtype,
self.maxresp, self.csum,
addrconv.ipv4.text_to_bin(self.address),
self.s_qrv, self.qqic, self.num)
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_query(
self.msgtype, self.maxresp, self.csum, self.address,
self.s_flg, self.qrv, self.qqic, self.num, self.srcs)
self.test_parser()
def test_default_args(self):
prev = ipv4(proto=inet.IPPROTO_IGMP)
g = igmpv3_query()
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_query._PACK_STR, six.binary_type(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
eq_(res[0], IGMP_TYPE_QUERY)
eq_(res[1], 100)
eq_(res[2], checksum(buf))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(res[4], 2)
eq_(res[5], 0)
eq_(res[6], 0)
# srcs without num
prev = ipv4(proto=inet.IPPROTO_IGMP)
srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
g = igmpv3_query(srcs=srcs)
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_query._PACK_STR, six.binary_type(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
eq_(res[0], IGMP_TYPE_QUERY)
eq_(res[1], 100)
eq_(res[2], checksum(buf))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(res[4], 2)
eq_(res[5], 0)
eq_(res[6], len(srcs))
res = unpack_from('4s4s4s', six.binary_type(buf), igmpv3_query._MIN_LEN)
eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0]))
eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1]))
eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2]))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmpv3_query.from_jsondict(jsondict['igmpv3_query'])
eq_(str(self.g), str(g))
def test_json_with_srcs(self):
self.setUp_with_srcs()
self.test_json()
class Test_igmpv3_report(unittest.TestCase):
""" Test case for Internet Group Management Protocol v3
Membership Report Message"""
def setUp(self):
self.msgtype = IGMP_TYPE_REPORT_V3
self.csum = 0
self.record_num = 0
self.records = []
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
def setUp_with_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], b'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 2, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], b'abcde\x00\x00\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records)
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.msgtype, self.g.msgtype)
eq_(self.csum, self.g.csum)
eq_(self.record_num, self.g.record_num)
eq_(self.records, self.g.records)
def test_init_with_records(self):
self.setUp_with_records()
self.test_init()
def test_parser(self):
_res = self.g.parser(six.binary_type(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.msgtype, self.msgtype)
eq_(res.csum, self.csum)
eq_(res.record_num, self.record_num)
eq_(repr(res.records), repr(self.records))
def test_parser_with_records(self):
self.setUp_with_records()
self.test_parser()
def test_serialize(self):
data = bytearray()
prev = None
buf = self.g.serialize(data, prev)
res = unpack_from(igmpv3_report._PACK_STR, six.binary_type(buf))
eq_(res[0], self.msgtype)
eq_(res[1], checksum(self.buf))
eq_(res[2], self.record_num)
def test_serialize_with_records(self):
self.setUp_with_records()
data = bytearray()
prev = None
buf = six.binary_type(self.g.serialize(data, prev))
res = unpack_from(igmpv3_report._PACK_STR, buf)
offset = igmpv3_report._MIN_LEN
rec1 = igmpv3_report_group.parser(buf[offset:])
offset += len(rec1)
rec2 = igmpv3_report_group.parser(buf[offset:])
offset += len(rec2)
rec3 = igmpv3_report_group.parser(buf[offset:])
offset += len(rec3)
rec4 = igmpv3_report_group.parser(buf[offset:])
eq_(res[0], self.msgtype)
eq_(res[1], checksum(self.buf))
eq_(res[2], self.record_num)
eq_(repr(rec1), repr(self.record1))
eq_(repr(rec2), repr(self.record2))
eq_(repr(rec3), repr(self.record3))
eq_(repr(rec4), repr(self.record4))
def _build_igmp(self):
dl_dst = '11:22:33:44:55:66'
dl_src = 'aa:bb:cc:dd:ee:ff'
dl_type = ether.ETH_TYPE_IP
e = ethernet(dl_dst, dl_src, dl_type)
total_length = len(ipv4()) + len(self.g)
nw_proto = inet.IPPROTO_IGMP
nw_dst = '11.22.33.44'
nw_src = '55.66.77.88'
i = ipv4(total_length=total_length, src=nw_src, dst=nw_dst,
proto=nw_proto, ttl=1)
p = Packet()
p.add_protocol(e)
p.add_protocol(i)
p.add_protocol(self.g)
p.serialize()
return p
def test_build_igmp(self):
p = self._build_igmp()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_IP)
i = self.find_protocol(p, "ipv4")
ok_(i)
eq_(i.proto, inet.IPPROTO_IGMP)
g = self.find_protocol(p, "igmpv3_report")
ok_(g)
eq_(g.msgtype, self.msgtype)
eq_(g.csum, checksum(self.buf))
eq_(g.record_num, self.record_num)
eq_(g.records, self.records)
def test_build_igmp_with_records(self):
self.setUp_with_records()
self.test_build_igmp()
def test_to_string(self):
igmp_values = {'msgtype': repr(self.msgtype),
'csum': repr(self.csum),
'record_num': repr(self.record_num),
'records': repr(self.records)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_report.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_records(self):
self.setUp_with_records()
self.test_to_string()
@raises(Exception)
def test_record_num_larger_than_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], b'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], b'abc\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) + 1
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
self.test_parser()
@raises(Exception)
def test_record_num_smaller_than_records(self):
self.record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
self.record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
self.record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], b'abc\x00')
self.record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], b'abc\x00')
self.records = [self.record1, self.record2, self.record3,
self.record4]
self.record_num = len(self.records) - 1
self.buf = pack(igmpv3_report._PACK_STR, self.msgtype,
self.csum, self.record_num)
self.buf += self.record1.serialize()
self.buf += self.record2.serialize()
self.buf += self.record3.serialize()
self.buf += self.record4.serialize()
self.g = igmpv3_report(
self.msgtype, self.csum, self.record_num, self.records)
self.test_parser()
def test_default_args(self):
prev = ipv4(proto=inet.IPPROTO_IGMP)
g = igmpv3_report()
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_report._PACK_STR, six.binary_type(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
eq_(res[0], IGMP_TYPE_REPORT_V3)
eq_(res[1], checksum(buf))
eq_(res[2], 0)
# records without record_num
prev = ipv4(proto=inet.IPPROTO_IGMP)
record1 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 0, '225.0.0.1')
record2 = igmpv3_report_group(
MODE_IS_INCLUDE, 0, 2, '225.0.0.2',
['172.16.10.10', '172.16.10.27'])
record3 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 0, '225.0.0.3', [], b'abc\x00')
record4 = igmpv3_report_group(
MODE_IS_INCLUDE, 1, 2, '225.0.0.4',
['172.16.10.10', '172.16.10.27'], b'abc\x00')
records = [record1, record2, record3, record4]
g = igmpv3_report(records=records)
prev.serialize(g, None)
buf = g.serialize(bytearray(), prev)
res = unpack_from(igmpv3_report._PACK_STR, six.binary_type(buf))
buf = bytearray(buf)
pack_into('!H', buf, 2, 0)
eq_(res[0], IGMP_TYPE_REPORT_V3)
eq_(res[1], checksum(buf))
eq_(res[2], len(records))
def test_json(self):
jsondict = self.g.to_jsondict()
g = igmpv3_report.from_jsondict(jsondict['igmpv3_report'])
eq_(str(self.g), str(g))
def test_json_with_records(self):
self.setUp_with_records()
self.test_json()
class Test_igmpv3_report_group(unittest.TestCase):
"""Test case for Group Records of
Internet Group Management Protocol v3 Membership Report Message"""
def setUp(self):
self.type_ = MODE_IS_INCLUDE
self.aux_len = 0
self.num = 0
self.address = '225.0.0.1'
self.srcs = []
self.aux = None
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_aux(self):
self.aux = b'\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) // 4
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def setUp_with_srcs_and_aux(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs)
self.aux = b'\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) // 4
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.g.type_)
eq_(self.aux_len, self.g.aux_len)
eq_(self.num, self.g.num)
eq_(self.address, self.g.address)
eq_(self.srcs, self.g.srcs)
eq_(self.aux, self.g.aux)
def test_init_with_srcs(self):
self.setUp_with_srcs()
self.test_init()
def test_init_with_aux(self):
self.setUp_with_aux()
self.test_init()
def test_init_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_init()
def test_parser(self):
_res = self.g.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(res.type_, self.type_)
eq_(res.aux_len, self.aux_len)
eq_(res.num, self.num)
eq_(res.address, self.address)
eq_(res.srcs, self.srcs)
eq_(res.aux, self.aux)
def test_parser_with_srcs(self):
self.setUp_with_srcs()
self.test_parser()
def test_parser_with_aux(self):
self.setUp_with_aux()
self.test_parser()
def test_parser_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_parser()
def test_serialize(self):
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
def test_serialize_with_srcs(self):
self.setUp_with_srcs()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
(src1, src2, src3) = unpack_from('4s4s4s', six.binary_type(buf),
igmpv3_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
def test_serialize_with_aux(self):
self.setUp_with_aux()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
(aux, ) = unpack_from('%ds' % (self.aux_len * 4), six.binary_type(buf),
igmpv3_report_group._MIN_LEN)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(aux, self.aux)
def test_serialize_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
buf = self.g.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
(src1, src2, src3) = unpack_from('4s4s4s', six.binary_type(buf),
igmpv3_report_group._MIN_LEN)
(aux, ) = unpack_from('%ds' % (self.aux_len * 4), six.binary_type(buf),
igmpv3_report_group._MIN_LEN + 12)
eq_(res[0], self.type_)
eq_(res[1], self.aux_len)
eq_(res[2], self.num)
eq_(res[3], addrconv.ipv4.text_to_bin(self.address))
eq_(src1, addrconv.ipv4.text_to_bin(self.srcs[0]))
eq_(src2, addrconv.ipv4.text_to_bin(self.srcs[1]))
eq_(src3, addrconv.ipv4.text_to_bin(self.srcs[2]))
eq_(aux, self.aux)
def test_to_string(self):
igmp_values = {'type_': repr(self.type_),
'aux_len': repr(self.aux_len),
'num': repr(self.num),
'address': repr(self.address),
'srcs': repr(self.srcs),
'aux': repr(self.aux)}
_g_str = ','.join(['%s=%s' % (k, igmp_values[k])
for k, v in inspect.getmembers(self.g)
if k in igmp_values])
g_str = '%s(%s)' % (igmpv3_report_group.__name__, _g_str)
eq_(str(self.g), g_str)
eq_(repr(self.g), g_str)
def test_to_string_with_srcs(self):
self.setUp_with_srcs()
self.test_to_string()
def test_to_string_with_aux(self):
self.setUp_with_aux()
self.test_to_string()
def test_to_string_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
self.test_to_string()
def test_len(self):
eq_(len(self.g), 8)
def test_len_with_srcs(self):
self.setUp_with_srcs()
eq_(len(self.g), 20)
def test_len_with_aux(self):
self.setUp_with_aux()
eq_(len(self.g), 16)
def test_len_with_srcs_and_aux(self):
self.setUp_with_srcs_and_aux()
eq_(len(self.g), 28)
@raises
def test_num_larger_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) + 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_num_smaller_than_srcs(self):
self.srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
self.num = len(self.srcs) - 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
for src in self.srcs:
self.buf += pack('4s', addrconv.ipv4.text_to_bin(src))
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_larger_than_aux(self):
self.aux = b'\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) // 4 + 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
@raises
def test_aux_len_smaller_than_aux(self):
self.aux = b'\x01\x02\x03\x04\x05\x00\x00\x00'
self.aux_len = len(self.aux) // 4 - 1
self.buf = pack(igmpv3_report_group._PACK_STR, self.type_,
self.aux_len, self.num,
addrconv.ipv4.text_to_bin(self.address))
self.buf += self.aux
self.g = igmpv3_report_group(
self.type_, self.aux_len, self.num, self.address,
self.srcs, self.aux)
self.test_parser()
def test_default_args(self):
rep = igmpv3_report_group()
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
# srcs without num
srcs = ['192.168.1.1', '192.168.1.2', '192.168.1.3']
rep = igmpv3_report_group(srcs=srcs)
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
eq_(res[1], 0)
eq_(res[2], len(srcs))
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
res = unpack_from('4s4s4s', six.binary_type(buf),
igmpv3_report_group._MIN_LEN)
eq_(res[0], addrconv.ipv4.text_to_bin(srcs[0]))
eq_(res[1], addrconv.ipv4.text_to_bin(srcs[1]))
eq_(res[2], addrconv.ipv4.text_to_bin(srcs[2]))
# aux without aux_len
aux = b'abcde'
rep = igmpv3_report_group(aux=aux)
buf = rep.serialize()
res = unpack_from(igmpv3_report_group._PACK_STR, six.binary_type(buf))
eq_(res[0], 0)
eq_(res[1], 2)
eq_(res[2], 0)
eq_(res[3], addrconv.ipv4.text_to_bin('0.0.0.0'))
eq_(buf[igmpv3_report_group._MIN_LEN:], b'abcde\x00\x00\x00')
|
boooka/GeoPowerOff | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/sitemaps/tests/__init__.py | 12133432 | |
flupzor/newsdiffs | refs/heads/master | news/migrations/0005_auto_20160312_1641.py | 2 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import news.models
class Migration(migrations.Migration):
dependencies = [
('news', '0004_auto_20160301_2235'),
]
operations = [
migrations.CreateModel(
name='RequestLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField()),
('source', models.CharField(max_length=255, db_index=True)),
('url', models.CharField(max_length=255, db_index=True)),
('server_address', models.CharField(max_length=255, db_index=True)),
],
),
migrations.AlterField(
model_name='article',
name='last_check',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='article',
name='last_update',
field=models.DateTimeField(),
),
]
|
indevgr/django | refs/heads/master | tests/template_tests/test_unicode.py | 347 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from django.template import Context, Engine
from django.template.base import TemplateEncodingError
from django.utils import six
from django.utils.safestring import SafeData
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
engine = Engine()
t1 = engine.from_string('ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = engine.from_string(s)
with self.assertRaises(TemplateEncodingError):
engine.from_string(b'\x80\xc5\xc0')
# Contexts can be constructed from unicode or UTF-8 bytestrings.
Context({b"var": b"foo"})
Context({"var": b"foo"})
c3 = Context({b"var": "Đđ"})
Context({"var": b"\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertIsInstance(t1.render(c3), six.text_type)
self.assertIsInstance(t1.render(c3), SafeData)
|
benvermaercke/pyqtgraph | refs/heads/develop | examples/ROItypes.py | 24 | #!/usr/bin/python -i
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
## create GUI
app = QtGui.QApplication([])
w = pg.GraphicsWindow(size=(800,800), border=True)
v = w.addViewBox(colspan=2)
#w = QtGui.QMainWindow()
#w.resize(800,800)
#v = pg.GraphicsView()
v.invertY(True) ## Images usually have their Y-axis pointing downward
v.setAspectLocked(True)
#v.enableMouse(True)
#v.autoPixelScale = False
#w.setCentralWidget(v)
#s = v.scene()
#v.setRange(QtCore.QRectF(-2, -2, 220, 220))
## Create image to display
arr = np.ones((100, 100), dtype=float)
arr[45:55, 45:55] = 0
arr[25, :] = 5
arr[:, 25] = 5
arr[75, :] = 5
arr[:, 75] = 5
arr[50, :] = 10
arr[:, 50] = 10
## Create image items, add to scene and set position
im1 = pg.ImageItem(arr)
im2 = pg.ImageItem(arr)
v.addItem(im1)
v.addItem(im2)
im2.moveBy(110, 20)
v.setRange(QtCore.QRectF(0, 0, 200, 120))
im3 = pg.ImageItem()
v2 = w.addViewBox(1,0)
v2.addItem(im3)
v2.setRange(QtCore.QRectF(0, 0, 60, 60))
v2.invertY(True)
v2.setAspectLocked(True)
#im3.moveBy(0, 130)
im3.setZValue(10)
im4 = pg.ImageItem()
v3 = w.addViewBox(1,1)
v3.addItem(im4)
v3.setRange(QtCore.QRectF(0, 0, 60, 60))
v3.invertY(True)
v3.setAspectLocked(True)
#im4.moveBy(110, 130)
im4.setZValue(10)
## create the plot
pi1 = w.addPlot(2,0, colspan=2)
#pi1 = pg.PlotItem()
#s.addItem(pi1)
#pi1.scale(0.5, 0.5)
#pi1.setGeometry(0, 170, 300, 100)
lastRoi = None
def updateRoi(roi):
global im1, im2, im3, im4, arr, lastRoi
if roi is None:
return
lastRoi = roi
arr1 = roi.getArrayRegion(im1.image, img=im1)
im3.setImage(arr1)
arr2 = roi.getArrayRegion(im2.image, img=im2)
im4.setImage(arr2)
updateRoiPlot(roi, arr1)
def updateRoiPlot(roi, data=None):
if data is None:
data = roi.getArrayRegion(im1.image, img=im1)
if data is not None:
roi.curve.setData(data.mean(axis=1))
## Create a variety of different ROI types
rois = []
rois.append(pg.TestROI([0, 0], [20, 20], maxBounds=QtCore.QRectF(-10, -10, 230, 140), pen=(0,9)))
rois.append(pg.LineROI([0, 0], [20, 20], width=5, pen=(1,9)))
rois.append(pg.MultiLineROI([[0, 50], [50, 60], [60, 30]], width=5, pen=(2,9)))
rois.append(pg.EllipseROI([110, 10], [30, 20], pen=(3,9)))
rois.append(pg.CircleROI([110, 50], [20, 20], pen=(4,9)))
rois.append(pg.PolygonROI([[2,0], [2.1,0], [2,.1]], pen=(5,9)))
#rois.append(SpiralROI([20,30], [1,1], pen=mkPen(0)))
## Add each ROI to the scene and link its data to a plot curve with the same color
for r in rois:
v.addItem(r)
c = pi1.plot(pen=r.pen)
r.curve = c
r.sigRegionChanged.connect(updateRoi)
def updateImage():
global im1, arr, lastRoi
r = abs(np.random.normal(loc=0, scale=(arr.max()-arr.min())*0.1, size=arr.shape))
im1.updateImage(arr + r)
updateRoi(lastRoi)
for r in rois:
updateRoiPlot(r)
## Rapidly update one of the images with random noise
t = QtCore.QTimer()
t.timeout.connect(updateImage)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
lexor90/node-compiler | refs/heads/master | node/deps/v8/tools/run-valgrind.py | 23 | #!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
# Uses valgrind from third_party/valgrind. Assumes the executable is passed
# with a path relative to the v8 root.
from os import path
import platform
import re
import subprocess
import sys
V8_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
MACHINE = 'linux_x64' if platform.machine() == 'x86_64' else 'linux_x86'
VALGRIND_ROOT = path.join(V8_ROOT, 'third_party', 'valgrind', MACHINE)
VALGRIND_BIN = path.join(VALGRIND_ROOT, 'bin', 'valgrind')
VALGRIND_LIB = path.join(VALGRIND_ROOT, 'lib', 'valgrind')
VALGRIND_ARGUMENTS = [
VALGRIND_BIN,
'--error-exitcode=1',
'--leak-check=full',
'--smc-check=all',
]
if len(sys.argv) < 2:
print 'Please provide an executable to analyze.'
sys.exit(1)
executable = path.join(V8_ROOT, sys.argv[1])
if not path.exists(executable):
print 'Cannot find the file specified: %s' % executable
sys.exit(1)
# Compute the command line.
command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
# Run valgrind.
process = subprocess.Popen(
command,
stderr=subprocess.PIPE,
env={'VALGRIND_LIB': VALGRIND_LIB}
)
code = process.wait();
errors = process.stderr.readlines();
# If valgrind produced an error, we report that to the user.
if code != 0:
sys.stderr.writelines(errors)
sys.exit(code)
# Look through the leak details and make sure that we don't
# have any definitely, indirectly, and possibly lost bytes.
LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
LEAK_LINE_MATCHER = re.compile(LEAK_RE)
LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
leaks = []
for line in errors:
if LEAK_LINE_MATCHER.search(line):
leaks.append(line)
if not LEAK_OKAY_MATCHER.search(line):
sys.stderr.writelines(errors)
sys.exit(1)
# Make sure we found between 2 and 3 leak lines.
if len(leaks) < 2 or len(leaks) > 3:
sys.stderr.writelines(errors)
sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
sys.exit(1)
# No leaks found.
sys.stderr.writelines(errors)
sys.exit(0)
|
sitsbeyou/Django-facebook | refs/heads/master | docs/docs_env/Lib/encodings/mac_iceland.py | 593 | """ Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
VTREEM/IfcOpenShell | refs/heads/master | src/ifcblender/io_import_scene_ifc/__init__.py | 4 | ###############################################################################
# #
# This file is part of IfcOpenShell. #
# #
# IfcOpenShell is free software: you can redistribute it and/or modify #
# it under the terms of the Lesser GNU General Public License as published by #
# the Free Software Foundation, either version 3.0 of the License, or #
# (at your option) any later version. #
# #
# IfcOpenShell is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Lesser GNU General Public License for more details. #
# #
# You should have received a copy of the Lesser GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# <pep8 compliant>
###############################################################################
# #
# Based on the Wavefront OBJ File importer by Campbell Barton #
# #
###############################################################################
bl_info = {
"name": "IfcBlender",
"description": "Import files in the "\
"Industry Foundation Classes (.ifc) file format",
"author": "Thomas Krijnen, IfcOpenShell",
"blender": (2, 73, 0),
"location": "File > Import",
"tracker_url": "https://sourceforge.net/p/ifcopenshell/"\
"_list/tickets?source=navbar",
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "ifcopenshell" in locals():
imp.reload(ifcopenshell)
import bpy
import mathutils
from bpy.props import StringProperty, IntProperty, BoolProperty
from bpy_extras.io_utils import ImportHelper
major,minor = bpy.app.version[0:2]
transpose_matrices = minor >= 62
bpy.types.Object.ifc_id = IntProperty(name="IFC Entity ID",
description="The STEP entity instance name")
bpy.types.Object.ifc_guid = StringProperty(name="IFC Entity GUID",
description="The IFC Globally Unique Identifier")
bpy.types.Object.ifc_name = StringProperty(name="IFC Entity Name",
description="The optional name attribute")
bpy.types.Object.ifc_type = StringProperty(name="IFC Entity Type",
description="The STEP Datatype keyword")
def import_ifc(filename, use_names, process_relations, blender_booleans):
from . import ifcopenshell
from .ifcopenshell import geom as ifcopenshell_geom
print("Reading %s..."%bpy.path.basename(filename))
settings = ifcopenshell_geom.settings()
settings.set(settings.DISABLE_OPENING_SUBTRACTIONS, blender_booleans)
iterator = ifcopenshell_geom.iterator(settings, filename)
valid_file = iterator.initialize()
if not valid_file:
return False
print("Done reading file")
id_to_object = {}
id_to_parent = {}
id_to_matrix = {}
openings = []
old_progress = -1
print("Creating geometry...")
while True:
ob = iterator.get()
f = ob.geometry.faces
v = ob.geometry.verts
mats = ob.geometry.materials
matids = ob.geometry.material_ids
m = ob.transformation.matrix.data
t = ob.type[0:21]
nm = ob.name if len(ob.name) and use_names else ob.guid
verts = [[v[i], v[i + 1], v[i + 2]] \
for i in range(0, len(v), 3)]
faces = [[f[i], f[i + 1], f[i + 2]] \
for i in range(0, len(f), 3)]
me = bpy.data.meshes.new('mesh%d' % ob.geometry.id)
me.from_pydata(verts, [], faces)
def add_material(mname, props):
if mname in bpy.data.materials:
mat = bpy.data.materials[mname]
mat.use_fake_user = True
else:
mat = bpy.data.materials.new(mname)
for k,v in props.items():
setattr(mat, k, v)
me.materials.append(mat)
needs_default = -1 in matids
if needs_default: add_material(t, {})
for mat in mats:
props = {}
if mat.has_diffuse: props['diffuse_color'] = mat.diffuse
if mat.has_specular: props['specular_color'] = mat.specular
if mat.has_transparency and mat.transparency > 0:
props['alpha'] = 1.0 - mat.transparency
props['use_transparency'] = True
if mat.has_specularity: props['specular_hardness'] = mat.specularity
add_material(mat.name, props)
bob = bpy.data.objects.new(nm, me)
mat = mathutils.Matrix(([m[0], m[1], m[2], 0],
[m[3], m[4], m[5], 0],
[m[6], m[7], m[8], 0],
[m[9], m[10], m[11], 1]))
if transpose_matrices: mat.transpose()
if process_relations:
id_to_matrix[ob.id] = mat
else:
bob.matrix_world = mat
bpy.context.scene.objects.link(bob)
bpy.context.scene.objects.active = bob
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.mode_set(mode='OBJECT')
bob.ifc_id, bob.ifc_guid, bob.ifc_name, bob.ifc_type = \
ob.id, ob.guid, ob.name, ob.type
if ob.type == 'IfcSpace' or ob.type == 'IfcOpeningElement':
if not (ob.type == 'IfcOpeningElement' and blender_booleans):
bob.hide = bob.hide_render = True
bob.draw_type = 'WIRE'
if ob.id not in id_to_object: id_to_object[ob.id] = []
id_to_object[ob.id].append(bob)
if ob.parent_id > 0:
id_to_parent[ob.id] = ob.parent_id
if blender_booleans and ob.type == 'IfcOpeningElement':
openings.append(ob.id)
faces = me.polygons if hasattr(me, 'polygons') else me.faces
if len(faces) == len(matids):
for face, matid in zip(faces, matids):
face.material_index = matid + (1 if needs_default else 0)
progress = iterator.progress() // 2
if progress > old_progress:
print("\r[" + "#" * progress + " " * (50 - progress) + "]", end="")
old_progress = progress
if not iterator.next():
break
print("\rDone creating geometry" + " " * 30)
id_to_parent_temp = dict(id_to_parent)
if process_relations:
print("Processing relations...")
while len(id_to_parent_temp) and process_relations:
id, parent_id = id_to_parent_temp.popitem()
if parent_id in id_to_object:
bob = id_to_object[parent_id][0]
else:
parent_ob = iterator.getObject(parent_id)
if parent_ob.id == -1:
bob = None
else:
m = parent_ob.transformation.matrix.data
nm = parent_ob.name if len(parent_ob.name) and use_names \
else parent_ob.guid
bob = bpy.data.objects.new(nm, None)
mat = mathutils.Matrix((
[m[0], m[1], m[2], 0],
[m[3], m[4], m[5], 0],
[m[6], m[7], m[8], 0],
[m[9], m[10], m[11], 1]))
if transpose_matrices: mat.transpose()
id_to_matrix[parent_ob.id] = mat
bpy.context.scene.objects.link(bob)
bob.ifc_id = parent_ob.id
bob.ifc_name, bob.ifc_type, bob.ifc_guid = \
parent_ob.name, parent_ob.type, parent_ob.guid
if parent_ob.parent_id > 0:
id_to_parent[parent_id] = parent_ob.parent_id
id_to_parent_temp[parent_id] = parent_ob.parent_id
if parent_id not in id_to_object: id_to_object[parent_id] = []
id_to_object[parent_id].append(bob)
if bob:
for ob in id_to_object[id]:
ob.parent = bob
id_to_matrix_temp = dict(id_to_matrix)
while len(id_to_matrix_temp):
id, matrix = id_to_matrix_temp.popitem()
parent_id = id_to_parent.get(id, None)
parent_matrix = id_to_matrix.get(parent_id, None)
for ob in id_to_object[id]:
if parent_matrix:
ob.matrix_local = parent_matrix.inverted() * matrix
else:
ob.matrix_world = matrix
if process_relations:
print("Done processing relations")
for opening_id in openings:
parent_id = id_to_parent[opening_id]
if parent_id in id_to_object:
parent_ob = id_to_object[parent_id][0]
for opening_ob in id_to_object[opening_id]:
mod = parent_ob.modifiers.new("opening", "BOOLEAN")
mod.operation = "DIFFERENCE"
mod.object = opening_ob
txt = bpy.data.texts.new("%s.log"%bpy.path.basename(filename))
txt.from_string(iterator.getLog())
return True
class ImportIFC(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.ifc"
bl_label = "Import .ifc file"
filename_ext = ".ifc"
filter_glob = StringProperty(default="*.ifc", options={'HIDDEN'})
use_names = BoolProperty(name="Use entity names",
description="Use entity names rather than GlobalIds for objects",
default=True)
process_relations = BoolProperty(name="Process relations",
description="Convert containment and aggregation" \
" relations to parenting" \
" (warning: may be slow on large files)",
default=False)
blender_booleans = BoolProperty(name="Use Blender booleans",
description="Use Blender boolean modifiers for opening" \
" elements",
default=False)
def execute(self, context):
if not import_ifc(self.filepath, self.use_names, self.process_relations, self.blender_booleans):
self.report({'ERROR'},
'Unable to parse .ifc file or no geometrical entities found'
)
return {'FINISHED'}
def menu_func_import(self, context):
self.layout.operator(ImportIFC.bl_idname,
text="Industry Foundation Classes (.ifc)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
|
Changaco/oh-mainline | refs/heads/master | vendor/packages/celery/celery/execute/trace.py | 18 | # -*- coding: utf-8 -*-
"""
celery.execute.trace
~~~~~~~~~~~~~~~~~~~~
This module defines how the task execution is traced:
errors are recorded, handlers are applied and so on.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
import traceback
from .. import states, signals
from ..datastructures import ExceptionInfo
from ..exceptions import RetryTaskError
from ..registry import tasks
class TraceInfo(object):
def __init__(self, status=states.PENDING, retval=None, exc_info=None):
self.status = status
self.retval = retval
self.exc_info = exc_info
self.exc_type = None
self.exc_value = None
self.tb = None
self.strtb = None
if self.exc_info:
self.exc_type, self.exc_value, self.tb = exc_info
self.strtb = "\n".join(traceback.format_exception(*exc_info))
@classmethod
def trace(cls, fun, args, kwargs, propagate=False):
"""Trace the execution of a function, calling the appropiate callback
if the function raises retry, an failure or returned successfully.
:keyword propagate: If true, errors will propagate to the caller.
"""
try:
return cls(states.SUCCESS, retval=fun(*args, **kwargs))
except RetryTaskError, exc:
return cls(states.RETRY, retval=exc, exc_info=sys.exc_info())
except Exception, exc:
if propagate:
raise
return cls(states.FAILURE, retval=exc, exc_info=sys.exc_info())
except BaseException, exc:
raise
except: # pragma: no cover
# For Python2.5 where raising strings are still allowed
# (but deprecated)
if propagate:
raise
return cls(states.FAILURE, retval=None, exc_info=sys.exc_info())
class TaskTrace(object):
def __init__(self, task_name, task_id, args, kwargs, task=None,
request=None, propagate=None, **_):
self.task_id = task_id
self.task_name = task_name
self.args = args
self.kwargs = kwargs
self.task = task or tasks[self.task_name]
self.request = request or {}
self.status = states.PENDING
self.strtb = None
self.propagate = propagate
self._trace_handlers = {states.FAILURE: self.handle_failure,
states.RETRY: self.handle_retry,
states.SUCCESS: self.handle_success}
def __call__(self):
return self.execute()
def execute(self):
self.task.request.update(self.request, args=self.args,
called_directly=False, kwargs=self.kwargs)
signals.task_prerun.send(sender=self.task, task_id=self.task_id,
task=self.task, args=self.args,
kwargs=self.kwargs)
retval = self._trace()
signals.task_postrun.send(sender=self.task, task_id=self.task_id,
task=self.task, args=self.args,
kwargs=self.kwargs, retval=retval)
self.task.request.clear()
return retval
def _trace(self):
trace = TraceInfo.trace(self.task, self.args, self.kwargs,
propagate=self.propagate)
self.status = trace.status
self.strtb = trace.strtb
handler = self._trace_handlers[trace.status]
r = handler(trace.retval, trace.exc_type, trace.tb, trace.strtb)
self.handle_after_return(trace.status, trace.retval,
trace.exc_type, trace.tb, trace.strtb,
einfo=trace.exc_info)
return r
def handle_after_return(self, status, retval, type_, tb, strtb,
einfo=None):
if status in states.EXCEPTION_STATES:
einfo = ExceptionInfo(einfo)
self.task.after_return(status, retval, self.task_id,
self.args, self.kwargs, einfo)
def handle_success(self, retval, *args):
"""Handle successful execution."""
self.task.on_success(retval, self.task_id, self.args, self.kwargs)
return retval
def handle_retry(self, exc, type_, tb, strtb):
"""Handle retry exception."""
# Create a simpler version of the RetryTaskError that stringifies
# the original exception instead of including the exception instance.
# This is for reporting the retry in logs, email etc, while
# guaranteeing pickleability.
message, orig_exc = exc.args
expanded_msg = "%s: %s" % (message, str(orig_exc))
einfo = ExceptionInfo((type_, type_(expanded_msg, None), tb))
self.task.on_retry(exc, self.task_id, self.args, self.kwargs, einfo)
return einfo
def handle_failure(self, exc, type_, tb, strtb):
"""Handle exception."""
einfo = ExceptionInfo((type_, exc, tb))
self.task.on_failure(exc, self.task_id, self.args, self.kwargs, einfo)
signals.task_failure.send(sender=self.task, task_id=self.task_id,
exception=exc, args=self.args,
kwargs=self.kwargs, traceback=tb,
einfo=einfo)
return einfo
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source/Lib/idlelib/idle_test/test_config.py | 3 | '''Test idlelib.config.
Coverage: 96% (100% for IdleConfParser, IdleUserConfParser*, ConfigChanges).
* Exception is OSError clause in Save method.
Much of IdleConf is also exercised by ConfigDialog and test_configdialog.
'''
import copy
import sys
import os
import tempfile
from test.support import captured_stderr, findfile
import unittest
from unittest import mock
import idlelib
from idlelib import config
from idlelib.idle_test.mock_idle import Func
# Tests should not depend on fortuitous user configurations.
# They must not affect actual user .cfg files.
# Replace user parsers with empty parsers that cannot be saved
# due to getting '' as the filename when created.
idleConf = config.idleConf
usercfg = idleConf.userCfg
testcfg = {}
usermain = testcfg['main'] = config.IdleUserConfParser('')
userhigh = testcfg['highlight'] = config.IdleUserConfParser('')
userkeys = testcfg['keys'] = config.IdleUserConfParser('')
userextn = testcfg['extensions'] = config.IdleUserConfParser('')
def setUpModule():
idleConf.userCfg = testcfg
idlelib.testing = True
def tearDownModule():
idleConf.userCfg = usercfg
idlelib.testing = False
class IdleConfParserTest(unittest.TestCase):
"""Test that IdleConfParser works"""
config = """
[one]
one = false
two = true
three = 10
[two]
one = a string
two = true
three = false
"""
def test_get(self):
parser = config.IdleConfParser('')
parser.read_string(self.config)
eq = self.assertEqual
# Test with type argument.
self.assertIs(parser.Get('one', 'one', type='bool'), False)
self.assertIs(parser.Get('one', 'two', type='bool'), True)
eq(parser.Get('one', 'three', type='int'), 10)
eq(parser.Get('two', 'one'), 'a string')
self.assertIs(parser.Get('two', 'two', type='bool'), True)
self.assertIs(parser.Get('two', 'three', type='bool'), False)
# Test without type should fallback to string.
eq(parser.Get('two', 'two'), 'true')
eq(parser.Get('two', 'three'), 'false')
# If option not exist, should return None, or default.
self.assertIsNone(parser.Get('not', 'exist'))
eq(parser.Get('not', 'exist', default='DEFAULT'), 'DEFAULT')
def test_get_option_list(self):
parser = config.IdleConfParser('')
parser.read_string(self.config)
get_list = parser.GetOptionList
self.assertCountEqual(get_list('one'), ['one', 'two', 'three'])
self.assertCountEqual(get_list('two'), ['one', 'two', 'three'])
self.assertEqual(get_list('not exist'), [])
def test_load_nothing(self):
parser = config.IdleConfParser('')
parser.Load()
self.assertEqual(parser.sections(), [])
def test_load_file(self):
# Borrow test/cfgparser.1 from test_configparser.
config_path = findfile('cfgparser.1')
parser = config.IdleConfParser(config_path)
parser.Load()
self.assertEqual(parser.Get('Foo Bar', 'foo'), 'newbar')
self.assertEqual(parser.GetOptionList('Foo Bar'), ['foo'])
class IdleUserConfParserTest(unittest.TestCase):
"""Test that IdleUserConfParser works"""
def new_parser(self, path=''):
return config.IdleUserConfParser(path)
def test_set_option(self):
parser = self.new_parser()
parser.add_section('Foo')
# Setting new option in existing section should return True.
self.assertTrue(parser.SetOption('Foo', 'bar', 'true'))
# Setting existing option with same value should return False.
self.assertFalse(parser.SetOption('Foo', 'bar', 'true'))
# Setting exiting option with new value should return True.
self.assertTrue(parser.SetOption('Foo', 'bar', 'false'))
self.assertEqual(parser.Get('Foo', 'bar'), 'false')
# Setting option in new section should create section and return True.
self.assertTrue(parser.SetOption('Bar', 'bar', 'true'))
self.assertCountEqual(parser.sections(), ['Bar', 'Foo'])
self.assertEqual(parser.Get('Bar', 'bar'), 'true')
def test_remove_option(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.SetOption('Foo', 'bar', 'true')
self.assertTrue(parser.RemoveOption('Foo', 'bar'))
self.assertFalse(parser.RemoveOption('Foo', 'bar'))
self.assertFalse(parser.RemoveOption('Not', 'Exist'))
def test_add_section(self):
parser = self.new_parser()
self.assertEqual(parser.sections(), [])
# Should not add duplicate section.
# Configparser raises DuplicateError, IdleParser not.
parser.AddSection('Foo')
parser.AddSection('Foo')
parser.AddSection('Bar')
self.assertCountEqual(parser.sections(), ['Bar', 'Foo'])
def test_remove_empty_sections(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.AddSection('Bar')
parser.SetOption('Idle', 'name', 'val')
self.assertCountEqual(parser.sections(), ['Bar', 'Foo', 'Idle'])
parser.RemoveEmptySections()
self.assertEqual(parser.sections(), ['Idle'])
def test_is_empty(self):
parser = self.new_parser()
parser.AddSection('Foo')
parser.AddSection('Bar')
self.assertTrue(parser.IsEmpty())
self.assertEqual(parser.sections(), [])
parser.SetOption('Foo', 'bar', 'false')
parser.AddSection('Bar')
self.assertFalse(parser.IsEmpty())
self.assertCountEqual(parser.sections(), ['Foo'])
def test_remove_file(self):
with tempfile.TemporaryDirectory() as tdir:
path = os.path.join(tdir, 'test.cfg')
parser = self.new_parser(path)
parser.RemoveFile() # Should not raise exception.
parser.AddSection('Foo')
parser.SetOption('Foo', 'bar', 'true')
parser.Save()
self.assertTrue(os.path.exists(path))
parser.RemoveFile()
self.assertFalse(os.path.exists(path))
def test_save(self):
with tempfile.TemporaryDirectory() as tdir:
path = os.path.join(tdir, 'test.cfg')
parser = self.new_parser(path)
parser.AddSection('Foo')
parser.SetOption('Foo', 'bar', 'true')
# Should save to path when config is not empty.
self.assertFalse(os.path.exists(path))
parser.Save()
self.assertTrue(os.path.exists(path))
# Should remove the file from disk when config is empty.
parser.remove_section('Foo')
parser.Save()
self.assertFalse(os.path.exists(path))
class IdleConfTest(unittest.TestCase):
"""Test for idleConf"""
@classmethod
def setUpClass(cls):
cls.config_string = {}
conf = config.IdleConf(_utest=True)
if __name__ != '__main__':
idle_dir = os.path.dirname(__file__)
else:
idle_dir = os.path.abspath(sys.path[0])
for ctype in conf.config_types:
config_path = os.path.join(idle_dir, '../config-%s.def' % ctype)
with open(config_path, 'r') as f:
cls.config_string[ctype] = f.read()
cls.orig_warn = config._warn
config._warn = Func()
@classmethod
def tearDownClass(cls):
config._warn = cls.orig_warn
def new_config(self, _utest=False):
return config.IdleConf(_utest=_utest)
def mock_config(self):
"""Return a mocked idleConf
Both default and user config used the same config-*.def
"""
conf = config.IdleConf(_utest=True)
for ctype in conf.config_types:
conf.defaultCfg[ctype] = config.IdleConfParser('')
conf.defaultCfg[ctype].read_string(self.config_string[ctype])
conf.userCfg[ctype] = config.IdleUserConfParser('')
conf.userCfg[ctype].read_string(self.config_string[ctype])
return conf
@unittest.skipIf(sys.platform.startswith('win'), 'this is test for unix system')
def test_get_user_cfg_dir_unix(self):
"Test to get user config directory under unix"
conf = self.new_config(_utest=True)
# Check normal way should success
with mock.patch('os.path.expanduser', return_value='/home/foo'):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(conf.GetUserCfgDir(), '/home/foo/.idlerc')
# Check os.getcwd should success
with mock.patch('os.path.expanduser', return_value='~'):
with mock.patch('os.getcwd', return_value='/home/foo/cpython'):
with mock.patch('os.mkdir'):
self.assertEqual(conf.GetUserCfgDir(),
'/home/foo/cpython/.idlerc')
# Check user dir not exists and created failed should raise SystemExit
with mock.patch('os.path.join', return_value='/path/not/exists'):
with self.assertRaises(SystemExit):
with self.assertRaises(FileNotFoundError):
conf.GetUserCfgDir()
@unittest.skipIf(not sys.platform.startswith('win'), 'this is test for windows system')
def test_get_user_cfg_dir_windows(self):
"Test to get user config directory under windows"
conf = self.new_config(_utest=True)
# Check normal way should success
with mock.patch('os.path.expanduser', return_value='C:\\foo'):
with mock.patch('os.path.exists', return_value=True):
self.assertEqual(conf.GetUserCfgDir(), 'C:\\foo\\.idlerc')
# Check os.getcwd should success
with mock.patch('os.path.expanduser', return_value='~'):
with mock.patch('os.getcwd', return_value='C:\\foo\\cpython'):
with mock.patch('os.mkdir'):
self.assertEqual(conf.GetUserCfgDir(),
'C:\\foo\\cpython\\.idlerc')
# Check user dir not exists and created failed should raise SystemExit
with mock.patch('os.path.join', return_value='/path/not/exists'):
with self.assertRaises(SystemExit):
with self.assertRaises(FileNotFoundError):
conf.GetUserCfgDir()
def test_create_config_handlers(self):
conf = self.new_config(_utest=True)
# Mock out idle_dir
idle_dir = '/home/foo'
with mock.patch.dict({'__name__': '__foo__'}):
with mock.patch('os.path.dirname', return_value=idle_dir):
conf.CreateConfigHandlers()
# Check keys are equal
self.assertCountEqual(conf.defaultCfg.keys(), conf.config_types)
self.assertCountEqual(conf.userCfg.keys(), conf.config_types)
# Check conf parser are correct type
for default_parser in conf.defaultCfg.values():
self.assertIsInstance(default_parser, config.IdleConfParser)
for user_parser in conf.userCfg.values():
self.assertIsInstance(user_parser, config.IdleUserConfParser)
# Check config path are correct
for config_type, parser in conf.defaultCfg.items():
self.assertEqual(parser.file,
os.path.join(idle_dir, 'config-%s.def' % config_type))
for config_type, parser in conf.userCfg.items():
self.assertEqual(parser.file,
os.path.join(conf.userdir, 'config-%s.cfg' % config_type))
def test_load_cfg_files(self):
conf = self.new_config(_utest=True)
# Borrow test/cfgparser.1 from test_configparser.
config_path = findfile('cfgparser.1')
conf.defaultCfg['foo'] = config.IdleConfParser(config_path)
conf.userCfg['foo'] = config.IdleUserConfParser(config_path)
# Load all config from path
conf.LoadCfgFiles()
eq = self.assertEqual
# Check defaultCfg is loaded
eq(conf.defaultCfg['foo'].Get('Foo Bar', 'foo'), 'newbar')
eq(conf.defaultCfg['foo'].GetOptionList('Foo Bar'), ['foo'])
# Check userCfg is loaded
eq(conf.userCfg['foo'].Get('Foo Bar', 'foo'), 'newbar')
eq(conf.userCfg['foo'].GetOptionList('Foo Bar'), ['foo'])
def test_save_user_cfg_files(self):
conf = self.mock_config()
with mock.patch('idlelib.config.IdleUserConfParser.Save') as m:
conf.SaveUserCfgFiles()
self.assertEqual(m.call_count, len(conf.userCfg))
def test_get_option(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetOption('main', 'EditorWindow', 'width'), '80')
eq(conf.GetOption('main', 'EditorWindow', 'width', type='int'), 80)
with mock.patch('idlelib.config._warn') as _warn:
eq(conf.GetOption('main', 'EditorWindow', 'font', type='int'), None)
eq(conf.GetOption('main', 'EditorWindow', 'NotExists'), None)
eq(conf.GetOption('main', 'EditorWindow', 'NotExists', default='NE'), 'NE')
eq(_warn.call_count, 4)
def test_set_option(self):
conf = self.mock_config()
conf.SetOption('main', 'Foo', 'bar', 'newbar')
self.assertEqual(conf.GetOption('main', 'Foo', 'bar'), 'newbar')
def test_get_section_list(self):
conf = self.mock_config()
self.assertCountEqual(
conf.GetSectionList('default', 'main'),
['General', 'EditorWindow', 'Indent', 'Theme',
'Keys', 'History', 'HelpFiles'])
self.assertCountEqual(
conf.GetSectionList('user', 'main'),
['General', 'EditorWindow', 'Indent', 'Theme',
'Keys', 'History', 'HelpFiles'])
with self.assertRaises(config.InvalidConfigSet):
conf.GetSectionList('foobar', 'main')
with self.assertRaises(config.InvalidConfigType):
conf.GetSectionList('default', 'notexists')
def test_get_highlight(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetHighlight('IDLE Classic', 'normal'), {'foreground': '#000000',
'background': '#ffffff'})
eq(conf.GetHighlight('IDLE Classic', 'normal', 'fg'), '#000000')
eq(conf.GetHighlight('IDLE Classic', 'normal', 'bg'), '#ffffff')
with self.assertRaises(config.InvalidFgBg):
conf.GetHighlight('IDLE Classic', 'normal', 'fb')
# Test cursor (this background should be normal-background)
eq(conf.GetHighlight('IDLE Classic', 'cursor'), {'foreground': 'black',
'background': '#ffffff'})
# Test get user themes
conf.SetOption('highlight', 'Foobar', 'normal-foreground', '#747474')
conf.SetOption('highlight', 'Foobar', 'normal-background', '#171717')
with mock.patch('idlelib.config._warn'):
eq(conf.GetHighlight('Foobar', 'normal'), {'foreground': '#747474',
'background': '#171717'})
def test_get_theme_dict(self):
"XXX: NOT YET DONE"
conf = self.mock_config()
# These two should be the same
self.assertEqual(
conf.GetThemeDict('default', 'IDLE Classic'),
conf.GetThemeDict('user', 'IDLE Classic'))
with self.assertRaises(config.InvalidTheme):
conf.GetThemeDict('bad', 'IDLE Classic')
def test_get_current_theme_and_keys(self):
conf = self.mock_config()
self.assertEqual(conf.CurrentTheme(), conf.current_colors_and_keys('Theme'))
self.assertEqual(conf.CurrentKeys(), conf.current_colors_and_keys('Keys'))
def test_current_colors_and_keys(self):
conf = self.mock_config()
self.assertEqual(conf.current_colors_and_keys('Theme'), 'IDLE Classic')
def test_default_keys(self):
current_platform = sys.platform
conf = self.new_config(_utest=True)
sys.platform = 'win32'
self.assertEqual(conf.default_keys(), 'IDLE Classic Windows')
sys.platform = 'darwin'
self.assertEqual(conf.default_keys(), 'IDLE Classic OSX')
sys.platform = 'some-linux'
self.assertEqual(conf.default_keys(), 'IDLE Modern Unix')
# Restore platform
sys.platform = current_platform
def test_get_extensions(self):
userextn.read_string('''
[ZzDummy]
enable = True
[DISABLE]
enable = False
''')
eq = self.assertEqual
iGE = idleConf.GetExtensions
eq(iGE(shell_only=True), [])
eq(iGE(), ['ZzDummy'])
eq(iGE(editor_only=True), ['ZzDummy'])
eq(iGE(active_only=False), ['ZzDummy', 'DISABLE'])
eq(iGE(active_only=False, editor_only=True), ['ZzDummy', 'DISABLE'])
userextn.remove_section('ZzDummy')
userextn.remove_section('DISABLE')
def test_remove_key_bind_names(self):
conf = self.mock_config()
self.assertCountEqual(
conf.RemoveKeyBindNames(conf.GetSectionList('default', 'extensions')),
['AutoComplete', 'CodeContext', 'FormatParagraph', 'ParenMatch','ZzDummy'])
def test_get_extn_name_for_event(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
eq = self.assertEqual
eq(idleConf.GetExtnNameForEvent('z-in'), 'ZzDummy')
eq(idleConf.GetExtnNameForEvent('z-out'), None)
userextn.remove_section('ZzDummy')
def test_get_extension_keys(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
self.assertEqual(idleConf.GetExtensionKeys('ZzDummy'),
{'<<z-in>>': ['<Control-Shift-KeyRelease-Insert>']})
userextn.remove_section('ZzDummy')
# need option key test
## key = ['<Option-Key-2>'] if sys.platform == 'darwin' else ['<Alt-Key-2>']
## eq(conf.GetExtensionKeys('ZoomHeight'), {'<<zoom-height>>': key})
def test_get_extension_bindings(self):
userextn.read_string('''
[ZzDummy]
enable = True
''')
eq = self.assertEqual
iGEB = idleConf.GetExtensionBindings
eq(iGEB('NotExists'), {})
expect = {'<<z-in>>': ['<Control-Shift-KeyRelease-Insert>'],
'<<z-out>>': ['<Control-Shift-KeyRelease-Delete>']}
eq(iGEB('ZzDummy'), expect)
userextn.remove_section('ZzDummy')
def test_get_keybinding(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetKeyBinding('IDLE Modern Unix', '<<copy>>'),
['<Control-Shift-Key-C>', '<Control-Key-Insert>'])
eq(conf.GetKeyBinding('IDLE Classic Unix', '<<copy>>'),
['<Alt-Key-w>', '<Meta-Key-w>'])
eq(conf.GetKeyBinding('IDLE Classic Windows', '<<copy>>'),
['<Control-Key-c>', '<Control-Key-C>'])
eq(conf.GetKeyBinding('IDLE Classic Mac', '<<copy>>'), ['<Command-Key-c>'])
eq(conf.GetKeyBinding('IDLE Classic OSX', '<<copy>>'), ['<Command-Key-c>'])
# Test keybinding not exists
eq(conf.GetKeyBinding('NOT EXISTS', '<<copy>>'), [])
eq(conf.GetKeyBinding('IDLE Modern Unix', 'NOT EXISTS'), [])
def test_get_current_keyset(self):
current_platform = sys.platform
conf = self.mock_config()
# Ensure that platform isn't darwin
sys.platform = 'some-linux'
self.assertEqual(conf.GetCurrentKeySet(), conf.GetKeySet(conf.CurrentKeys()))
# This should not be the same, since replace <Alt- to <Option-.
# Above depended on config-extensions.def having Alt keys,
# which is no longer true.
# sys.platform = 'darwin'
# self.assertNotEqual(conf.GetCurrentKeySet(), conf.GetKeySet(conf.CurrentKeys()))
# Restore platform
sys.platform = current_platform
def test_get_keyset(self):
conf = self.mock_config()
# Conflic with key set, should be disable to ''
conf.defaultCfg['extensions'].add_section('Foobar')
conf.defaultCfg['extensions'].add_section('Foobar_cfgBindings')
conf.defaultCfg['extensions'].set('Foobar', 'enable', 'True')
conf.defaultCfg['extensions'].set('Foobar_cfgBindings', 'newfoo', '<Key-F3>')
self.assertEqual(conf.GetKeySet('IDLE Modern Unix')['<<newfoo>>'], '')
def test_is_core_binding(self):
# XXX: Should move out the core keys to config file or other place
conf = self.mock_config()
self.assertTrue(conf.IsCoreBinding('copy'))
self.assertTrue(conf.IsCoreBinding('cut'))
self.assertTrue(conf.IsCoreBinding('del-word-right'))
self.assertFalse(conf.IsCoreBinding('not-exists'))
def test_extra_help_source_list(self):
# Test GetExtraHelpSourceList and GetAllExtraHelpSourcesList in same
# place to prevent prepare input data twice.
conf = self.mock_config()
# Test default with no extra help source
self.assertEqual(conf.GetExtraHelpSourceList('default'), [])
self.assertEqual(conf.GetExtraHelpSourceList('user'), [])
with self.assertRaises(config.InvalidConfigSet):
self.assertEqual(conf.GetExtraHelpSourceList('bad'), [])
self.assertCountEqual(
conf.GetAllExtraHelpSourcesList(),
conf.GetExtraHelpSourceList('default') + conf.GetExtraHelpSourceList('user'))
# Add help source to user config
conf.userCfg['main'].SetOption('HelpFiles', '4', 'Python;https://python.org') # This is bad input
conf.userCfg['main'].SetOption('HelpFiles', '3', 'Python:https://python.org') # This is bad input
conf.userCfg['main'].SetOption('HelpFiles', '2', 'Pillow;https://pillow.readthedocs.io/en/latest/')
conf.userCfg['main'].SetOption('HelpFiles', '1', 'IDLE;C:/Programs/Python36/Lib/idlelib/help.html')
self.assertEqual(conf.GetExtraHelpSourceList('user'),
[('IDLE', 'C:/Programs/Python36/Lib/idlelib/help.html', '1'),
('Pillow', 'https://pillow.readthedocs.io/en/latest/', '2'),
('Python', 'https://python.org', '4')])
self.assertCountEqual(
conf.GetAllExtraHelpSourcesList(),
conf.GetExtraHelpSourceList('default') + conf.GetExtraHelpSourceList('user'))
def test_get_font(self):
from test.support import requires
from tkinter import Tk
from tkinter.font import Font
conf = self.mock_config()
requires('gui')
root = Tk()
root.withdraw()
f = Font.actual(Font(name='TkFixedFont', exists=True, root=root))
self.assertEqual(
conf.GetFont(root, 'main', 'EditorWindow'),
(f['family'], 10 if f['size'] <= 0 else f['size'], f['weight']))
# Cleanup root
root.destroy()
del root
def test_get_core_keys(self):
conf = self.mock_config()
eq = self.assertEqual
eq(conf.GetCoreKeys()['<<center-insert>>'], ['<Control-l>'])
eq(conf.GetCoreKeys()['<<copy>>'], ['<Control-c>', '<Control-C>'])
eq(conf.GetCoreKeys()['<<history-next>>'], ['<Alt-n>'])
eq(conf.GetCoreKeys('IDLE Classic Windows')['<<center-insert>>'],
['<Control-Key-l>', '<Control-Key-L>'])
eq(conf.GetCoreKeys('IDLE Classic OSX')['<<copy>>'], ['<Command-Key-c>'])
eq(conf.GetCoreKeys('IDLE Classic Unix')['<<history-next>>'],
['<Alt-Key-n>', '<Meta-Key-n>'])
eq(conf.GetCoreKeys('IDLE Modern Unix')['<<history-next>>'],
['<Alt-Key-n>', '<Meta-Key-n>'])
class CurrentColorKeysTest(unittest.TestCase):
""" Test colorkeys function with user config [Theme] and [Keys] patterns.
colorkeys = config.IdleConf.current_colors_and_keys
Test all patterns written by IDLE and some errors
Item 'default' should really be 'builtin' (versus 'custom).
"""
colorkeys = idleConf.current_colors_and_keys
default_theme = 'IDLE Classic'
default_keys = idleConf.default_keys()
def test_old_builtin_theme(self):
# On initial installation, user main is blank.
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# For old default, name2 must be blank.
usermain.read_string('''
[Theme]
default = True
''')
# IDLE omits 'name' for default old builtin theme.
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# IDLE adds 'name' for non-default old builtin theme.
usermain['Theme']['name'] = 'IDLE New'
self.assertEqual(self.colorkeys('Theme'), 'IDLE New')
# Erroneous non-default old builtin reverts to default.
usermain['Theme']['name'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
usermain.remove_section('Theme')
def test_new_builtin_theme(self):
# IDLE writes name2 for new builtins.
usermain.read_string('''
[Theme]
default = True
name2 = IDLE Dark
''')
self.assertEqual(self.colorkeys('Theme'), 'IDLE Dark')
# Leftover 'name', not removed, is ignored.
usermain['Theme']['name'] = 'IDLE New'
self.assertEqual(self.colorkeys('Theme'), 'IDLE Dark')
# Erroneous non-default new builtin reverts to default.
usermain['Theme']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
usermain.remove_section('Theme')
def test_user_override_theme(self):
# Erroneous custom name (no definition) reverts to default.
usermain.read_string('''
[Theme]
default = False
name = Custom Dark
''')
self.assertEqual(self.colorkeys('Theme'), self.default_theme)
# Custom name is valid with matching Section name.
userhigh.read_string('[Custom Dark]\na=b')
self.assertEqual(self.colorkeys('Theme'), 'Custom Dark')
# Name2 is ignored.
usermain['Theme']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Theme'), 'Custom Dark')
usermain.remove_section('Theme')
userhigh.remove_section('Custom Dark')
def test_old_builtin_keys(self):
# On initial installation, user main is blank.
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
# For old default, name2 must be blank, name is always used.
usermain.read_string('''
[Keys]
default = True
name = IDLE Classic Unix
''')
self.assertEqual(self.colorkeys('Keys'), 'IDLE Classic Unix')
# Erroneous non-default old builtin reverts to default.
usermain['Keys']['name'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
usermain.remove_section('Keys')
def test_new_builtin_keys(self):
# IDLE writes name2 for new builtins.
usermain.read_string('''
[Keys]
default = True
name2 = IDLE Modern Unix
''')
self.assertEqual(self.colorkeys('Keys'), 'IDLE Modern Unix')
# Leftover 'name', not removed, is ignored.
usermain['Keys']['name'] = 'IDLE Classic Unix'
self.assertEqual(self.colorkeys('Keys'), 'IDLE Modern Unix')
# Erroneous non-default new builtin reverts to default.
usermain['Keys']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
usermain.remove_section('Keys')
def test_user_override_keys(self):
# Erroneous custom name (no definition) reverts to default.
usermain.read_string('''
[Keys]
default = False
name = Custom Keys
''')
self.assertEqual(self.colorkeys('Keys'), self.default_keys)
# Custom name is valid with matching Section name.
userkeys.read_string('[Custom Keys]\na=b')
self.assertEqual(self.colorkeys('Keys'), 'Custom Keys')
# Name2 is ignored.
usermain['Keys']['name2'] = 'non-existent'
self.assertEqual(self.colorkeys('Keys'), 'Custom Keys')
usermain.remove_section('Keys')
userkeys.remove_section('Custom Keys')
class ChangesTest(unittest.TestCase):
empty = {'main':{}, 'highlight':{}, 'keys':{}, 'extensions':{}}
def load(self): # Test_add_option verifies that this works.
changes = self.changes
changes.add_option('main', 'Msec', 'mitem', 'mval')
changes.add_option('highlight', 'Hsec', 'hitem', 'hval')
changes.add_option('keys', 'Ksec', 'kitem', 'kval')
return changes
loaded = {'main': {'Msec': {'mitem': 'mval'}},
'highlight': {'Hsec': {'hitem': 'hval'}},
'keys': {'Ksec': {'kitem':'kval'}},
'extensions': {}}
def setUp(self):
self.changes = config.ConfigChanges()
def test_init(self):
self.assertEqual(self.changes, self.empty)
def test_add_option(self):
changes = self.load()
self.assertEqual(changes, self.loaded)
changes.add_option('main', 'Msec', 'mitem', 'mval')
self.assertEqual(changes, self.loaded)
def test_save_option(self): # Static function does not touch changes.
save_option = self.changes.save_option
self.assertTrue(save_option('main', 'Indent', 'what', '0'))
self.assertFalse(save_option('main', 'Indent', 'what', '0'))
self.assertEqual(usermain['Indent']['what'], '0')
self.assertTrue(save_option('main', 'Indent', 'use-spaces', '0'))
self.assertEqual(usermain['Indent']['use-spaces'], '0')
self.assertTrue(save_option('main', 'Indent', 'use-spaces', '1'))
self.assertFalse(usermain.has_option('Indent', 'use-spaces'))
usermain.remove_section('Indent')
def test_save_added(self):
changes = self.load()
self.assertTrue(changes.save_all())
self.assertEqual(usermain['Msec']['mitem'], 'mval')
self.assertEqual(userhigh['Hsec']['hitem'], 'hval')
self.assertEqual(userkeys['Ksec']['kitem'], 'kval')
changes.add_option('main', 'Msec', 'mitem', 'mval')
self.assertFalse(changes.save_all())
usermain.remove_section('Msec')
userhigh.remove_section('Hsec')
userkeys.remove_section('Ksec')
def test_save_help(self):
# Any change to HelpFiles overwrites entire section.
changes = self.changes
changes.save_option('main', 'HelpFiles', 'IDLE', 'idledoc')
changes.add_option('main', 'HelpFiles', 'ELDI', 'codeldi')
changes.save_all()
self.assertFalse(usermain.has_option('HelpFiles', 'IDLE'))
self.assertTrue(usermain.has_option('HelpFiles', 'ELDI'))
def test_save_default(self): # Cover 2nd and 3rd false branches.
changes = self.changes
changes.add_option('main', 'Indent', 'use-spaces', '1')
# save_option returns False; cfg_type_changed remains False.
# TODO: test that save_all calls usercfg Saves.
def test_delete_section(self):
changes = self.load()
changes.delete_section('main', 'fake') # Test no exception.
self.assertEqual(changes, self.loaded) # Test nothing deleted.
for cfgtype, section in (('main', 'Msec'), ('keys', 'Ksec')):
testcfg[cfgtype].SetOption(section, 'name', 'value')
changes.delete_section(cfgtype, section)
with self.assertRaises(KeyError):
changes[cfgtype][section] # Test section gone from changes
testcfg[cfgtype][section] # and from mock userCfg.
# TODO test for save call.
def test_clear(self):
changes = self.load()
changes.clear()
self.assertEqual(changes, self.empty)
class WarningTest(unittest.TestCase):
def test_warn(self):
Equal = self.assertEqual
config._warned = set()
with captured_stderr() as stderr:
config._warn('warning', 'key')
Equal(config._warned, {('warning','key')})
Equal(stderr.getvalue(), 'warning'+'\n')
with captured_stderr() as stderr:
config._warn('warning', 'key')
Equal(stderr.getvalue(), '')
with captured_stderr() as stderr:
config._warn('warn2', 'yek')
Equal(config._warned, {('warning','key'), ('warn2','yek')})
Equal(stderr.getvalue(), 'warn2'+'\n')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
dingocuster/scikit-learn | refs/heads/master | examples/cluster/plot_color_quantization.py | 297 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
ygenc/onlineLDA | refs/heads/master | onlineldavb_new/build/scipy/scipy/ndimage/interpolation.py | 4 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom_div = numpy.array(output_shape, float) - 1
zoom = (numpy.array(input.shape) - 1) / zoom_div
# Zooming to infinity is unpredictable, so just choose
# zoom factor 1 instead
zoom[numpy.isinf(zoom)] = 1
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
tsdmgz/ansible | refs/heads/devel | lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py | 56 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_server_snapshot
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- The list of CLC server Ids.
required: True
expiration_days:
description:
- The number of days to keep the server snapshot before it expires.
default: 7
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'restore']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
expiration_days: 10
wait: True
state: present
- name: Restore server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: restore
- name: Delete server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcSnapshot:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
server_ids = p['server_ids']
expiration_days = p['expiration_days']
state = p['state']
request_list = []
changed = False
changed_servers = []
self._set_clc_credentials_from_env()
if state == 'present':
changed, request_list, changed_servers = self.ensure_server_snapshot_present(
server_ids=server_ids,
expiration_days=expiration_days)
elif state == 'absent':
changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
server_ids=server_ids)
elif state == 'restore':
changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
server_ids=server_ids)
self._wait_for_requests_to_complete(request_list)
return self.module.exit_json(
changed=changed,
server_ids=changed_servers)
def ensure_server_snapshot_present(self, server_ids, expiration_days):
"""
Ensures the given set of server_ids have the snapshots created
:param server_ids: The list of server_ids to create the snapshot
:param expiration_days: The number of days to keep the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) == 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._create_server_snapshot(server, expiration_days)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _create_server_snapshot(self, server, expiration_days):
"""
Create the snapshot for the CLC server
:param server: the CLC server object
:param expiration_days: The number of days to keep the snapshot
:return: the create request object from CLC API Call
"""
result = None
try:
result = server.CreateSnapshot(
delete_existing=True,
expiration_days=expiration_days)
except CLCException as ex:
self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_absent(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots removed
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._delete_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _delete_server_snapshot(self, server):
"""
Delete snapshot for the CLC server
:param server: the CLC server object
:return: the delete snapshot request object from CLC API
"""
result = None
try:
result = server.DeleteSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_restore(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots restored
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._restore_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _restore_server_snapshot(self, server):
"""
Restore snapshot for the CLC server
:param server: the CLC server object
:return: the restore snapshot request object from CLC API
"""
result = None
try:
result = server.RestoreSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process server snapshot request')
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
expiration_days=dict(default=7),
wait=dict(default=True),
state=dict(
default='present',
choices=[
'present',
'absent',
'restore']),
)
return argument_spec
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: The error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcSnapshot.define_argument_spec(),
supports_check_mode=True
)
clc_snapshot = ClcSnapshot(module)
clc_snapshot.process_request()
if __name__ == '__main__':
main()
|
amitdeutsch/oppia | refs/heads/develop | scripts/experimental_deploy.py | 1 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an experimental deployment script for Oppia. It should only be used
for experimental testing, since it omits several safeguards: for example, it
does not run tests and it does not use a 'deploy_data' folder.
USE THIS SCRIPT AT YOUR OWN RISK!
Note:
1. Before running this script, you must install third-party dependencies by
running
bash scripts/start.sh
at least once.
2. This script should be run from the oppia root folder:
python scripts/experimental_deploy.py --app_name=[APP_NAME]
where [APP_NAME] is the name of your app. Note that the root folder MUST be
named 'oppia'.
"""
import argparse
import datetime
import os
import shutil
import subprocess
import common
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--app_name', help='name of the app to deploy to', type=str)
PARSED_ARGS = _PARSER.parse_args()
if PARSED_ARGS.app_name:
APP_NAME = PARSED_ARGS.app_name
if APP_NAME in ['oppiaserver', 'oppiatestserver']:
raise Exception(
'This script should not be used for updating %s. Please use '
'scripts/deploy.py instead.' % APP_NAME)
else:
raise Exception('No app name specified.')
CURRENT_DATETIME = datetime.datetime.utcnow()
RELEASE_DIR_NAME = 'deploy-EXPERIMENT-%s-%s' % (
'-'.join('-'.join(APP_NAME.split('.')).split(':')),
CURRENT_DATETIME.strftime('%Y%m%d-%H%M%S'))
RELEASE_DIR_PATH = os.path.join(os.getcwd(), '..', RELEASE_DIR_NAME)
APPCFG_PATH = os.path.join(
'..', 'oppia_tools', 'google_appengine_1.9.19', 'google_appengine',
'appcfg.py')
LOG_FILE_PATH = os.path.join('..', 'experimental_deploy.log')
THIRD_PARTY_DIR = os.path.join('.', 'third_party')
def preprocess_release():
"""Pre-processes release files.
This function should be called from within RELEASE_DIR_NAME. Currently it
does the following:
(1) Changes the app name in app.yaml to APP_NAME.
"""
# Change the app name in app.yaml.
f = open('app.yaml', 'r')
content = f.read()
os.remove('app.yaml')
content = content.replace('oppiaserver', APP_NAME)
d = open('app.yaml', 'w+')
d.write(content)
# Check that the current directory is correct.
common.require_cwd_to_be_oppia()
CURRENT_GIT_VERSION = subprocess.check_output(
['git', 'rev-parse', 'HEAD']).strip()
print ''
print 'Starting experimental deployment process.'
if not os.path.exists(THIRD_PARTY_DIR):
raise Exception(
'Could not find third_party directory at %s. Please run start.sh '
'prior to running this script.' % THIRD_PARTY_DIR)
# Create a folder in which to save the release candidate.
print 'Ensuring that the release directory parent exists'
common.ensure_directory_exists(os.path.dirname(RELEASE_DIR_PATH))
# Copy files to the release directory. Omits the .git subfolder.
print 'Copying files to the release directory'
shutil.copytree(
os.getcwd(), RELEASE_DIR_PATH, ignore=shutil.ignore_patterns('.git'))
# Change the current directory to the release candidate folder.
with common.CD(RELEASE_DIR_PATH):
if not os.getcwd().endswith(RELEASE_DIR_NAME):
raise Exception(
'Invalid directory accessed during deployment: %s' % os.getcwd())
print 'Changing directory to %s' % os.getcwd()
print 'Preprocessing release...'
preprocess_release()
# Do a build; ensure there are no errors.
print 'Building and minifying scripts...'
subprocess.check_output(['python', 'scripts/build.py'])
# Deploy to GAE.
subprocess.check_output([APPCFG_PATH, 'update', '.', '--oauth2'])
# Writing log entry.
common.ensure_directory_exists(os.path.dirname(LOG_FILE_PATH))
with open(LOG_FILE_PATH, 'a') as log_file:
log_file.write(
'Successfully completed experimental deployment to %s at %s '
'(version %s)\n' % (
APP_NAME, CURRENT_DATETIME.strftime('%Y-%m-%d %H:%M:%S'),
CURRENT_GIT_VERSION))
print 'Returning to oppia/ root directory.'
print 'Done!'
|
twitter/pants | refs/heads/master | tests/python/pants_test/engine/legacy/test_build_ignore_integration.py | 1 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tempfile
from builtins import open
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IgnorePatternsPantsIniIntegrationTest(PantsRunIntegrationTest):
"""Tests the functionality of the build_ignore_patterns option in pants.ini ."""
@classmethod
def use_pantsd_env_var(cls):
"""
Some of the tests here expect to read the standard error after an intentional failure.
However, when pantsd is enabled, these errors are logged to logs/exceptions.<pid>.log
So stderr appears empty. (see #7320)
"""
return False
def test_build_ignore_patterns_pants_ini(self):
def output_to_list(output_filename):
with open(output_filename, 'r') as results_file:
return {line.rstrip() for line in results_file.readlines()}
tempdir = tempfile.mkdtemp()
tmp_output = os.path.join(tempdir, 'minimize-output1.txt')
run_result = self.run_pants(['minimize',
'testprojects::',
'--quiet',
'--minimize-output-file={0}'.format(tmp_output)])
self.assert_success(run_result)
results = output_to_list(tmp_output)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:ten-thousand',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:once-upon-a-time',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:lesser-of-two',
results)
self.assertIn('testprojects/src/java/org/pantsbuild/testproject/phrases:there-was-a-duck',
results)
tmp_output = os.path.join(tempdir, 'minimize-output2.txt')
run_result = self.run_pants(['minimize',
'testprojects::',
'--quiet',
'--minimize-output-file={0}'.format(tmp_output)],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/java/org/pantsbuild/testproject/phrases'
]
}
})
self.assert_success(run_result)
results = output_to_list(tmp_output)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:ten-thousand',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:once-upon-a-time',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:lesser-of-two',
results)
self.assertNotIn('testprojects/src/java/org/pantsbuild/testproject/phrases:there-was-a-duck',
results)
def test_build_ignore_dependency(self):
run_result = self.run_pants(['-q',
'dependencies',
'testprojects/tests/python/pants::'],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/'
]
}
})
self.assert_failure(run_result)
# Error message complains dependency dir has no BUILD files.
self.assertIn('testprojects/src/thrift/org/pantsbuild/constants_only', run_result.stderr_data)
def test_build_ignore_dependency_success(self):
run_result = self.run_pants(['-q',
'dependencies',
'testprojects/tests/python/pants::'],
config={
'DEFAULT': {
'build_ignore': [
'testprojects/src/antlr'
]
}
})
self.assert_success(run_result)
self.assertIn('testprojects/tests/python/pants/constants_only:constants_only', run_result.stdout_data)
|
Maccimo/intellij-community | refs/heads/master | python/testData/inspections/PyTypeCheckerInspection/NewTypeAsParameter.py | 19 | from typing import NewType
UserId = NewType("UserId", int)
def get_user(user: UserId) -> str:
pass
get_user(UserId(5))
get_user(<warning descr="Expected type 'UserId', got 'str' instead">"John"</warning>)
get_user(<warning descr="Expected type 'UserId', got 'int' instead">4</warning>) |
zatchgordon/webGL | refs/heads/master | utils/exporters/blender/addons/io_three/exporter/api/object.py | 124 | import math
import mathutils
import bpy
from bpy import data, context, types
from bpy_extras.io_utils import axis_conversion
from .. import constants, logger, utilities, exceptions
from .constants import (
MESH,
EMPTY,
ARMATURE,
LAMP,
SPOT,
SUN,
POINT,
HEMI,
AREA,
CAMERA,
PERSP,
ORTHO,
RENDER,
NO_SHADOW,
ZYX
)
# Blender doesn't seem to have a good way to link a mesh back to the
# objects that are instancing it, or it is bloody obvious and I haven't
# discovered yet. This manifest serves as a way for me to map a mesh
# node to the object nodes that are using it.
_MESH_MAP = {}
def _object(func):
"""
:param func:
"""
def inner(arg, *args, **kwargs):
"""
:param arg:
:param *args:
:param **kwargs:
"""
if isinstance(arg, types.Object):
obj = arg
else:
obj = data.objects[arg]
return func(obj, *args, **kwargs)
return inner
def clear_mesh_map():
"""Clears the mesh map, required on initialization"""
_MESH_MAP.clear()
def assemblies(valid_types, options):
"""
:param valid_types:
:param options:
"""
logger.debug('object.assemblies(%s)', valid_types)
for obj in data.objects:
# rigged assets are parented under armature nodes
if obj.parent and obj.parent.type != ARMATURE:
continue
if obj.parent and obj.parent.type == ARMATURE:
logger.info('Has armature parent %s', obj.name)
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def cast_shadow(obj):
"""
:param obj:
"""
logger.debug('object.cast_shadow(%s)', obj)
if obj.type == LAMP:
if obj.data.type in (SPOT, SUN):
ret = obj.data.shadow_method != NO_SHADOW
else:
logger.info('%s is a lamp but this lamp type does not '\
'have supported shadows in ThreeJS', obj.name)
ret = None
return ret
elif obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_cast_shadows
else:
return False
@_object
def children(obj, valid_types):
"""
:param obj:
:param valid_types:
"""
logger.debug('object.children(%s, %s)', obj, valid_types)
for child in obj.children:
if child.type in valid_types:
yield child.name
@_object
def material(obj):
"""
:param obj:
"""
logger.debug('object.material(%s)', obj)
try:
return obj.material_slots[0].name
except IndexError:
pass
@_object
def mesh(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.mesh(%s, %s)', obj, options)
if obj.type != MESH:
return
for mesh_, objects in _MESH_MAP.items():
if obj in objects:
return mesh_
else:
logger.debug('Could not map object, updating manifest')
mesh_ = extract_mesh(obj, options)
if len(mesh_.tessfaces) is not 0:
manifest = _MESH_MAP.setdefault(mesh_.name, [])
manifest.append(obj)
mesh_name = mesh_.name
else:
# possibly just being used as a controller
logger.info('Object %s has no faces', obj.name)
mesh_name = None
return mesh_name
@_object
def name(obj):
"""
:param obj:
"""
return obj.name
@_object
def node_type(obj):
"""
:param obj:
"""
logger.debug('object.node_type(%s)', obj)
# standard transformation nodes are inferred
if obj.type == MESH:
return constants.MESH.title()
elif obj.type == EMPTY:
return constants.OBJECT.title()
dispatch = {
LAMP: {
POINT: constants.POINT_LIGHT,
SUN: constants.DIRECTIONAL_LIGHT,
SPOT: constants.SPOT_LIGHT,
HEMI: constants.HEMISPHERE_LIGHT,
AREA: constants.AREA_LIGHT,
},
CAMERA: {
PERSP: constants.PERSPECTIVE_CAMERA,
ORTHO: constants.ORTHOGRAPHIC_CAMERA
}
}
try:
return dispatch[obj.type][obj.data.type]
except AttributeError:
msg = 'Invalid type: %s' % obj.type
raise exceptions.UnsupportedObjectType(msg)
def nodes(valid_types, options):
"""
:param valid_types:
:param options:
"""
for obj in data.objects:
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def position(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.position(%s)', obj)
vector = matrix(obj, options).to_translation()
return (vector.x, vector.y, vector.z)
@_object
def receive_shadow(obj):
"""
:param obj:
"""
if obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_shadows
else:
return False
AXIS_CONVERSION = axis_conversion(to_forward='Z', to_up='Y').to_4x4()
@_object
def matrix(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.matrix(%s)', obj)
if options.get(constants.HIERARCHY, False) and obj.parent:
parent_inverted = obj.parent.matrix_world.inverted(mathutils.Matrix())
return parent_inverted * obj.matrix_world
else:
return AXIS_CONVERSION * obj.matrix_world
@_object
def rotation(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.rotation(%s)', obj)
vector = matrix(obj, options).to_euler(ZYX)
return (vector.x, vector.y, vector.z)
@_object
def scale(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.scale(%s)', obj)
vector = matrix(obj, options).to_scale()
return (vector.x, vector.y, vector.z)
@_object
def select(obj):
"""
:param obj:
"""
obj.select = True
@_object
def unselect(obj):
"""
:param obj:
"""
obj.select = False
@_object
def visible(obj):
"""
:param obj:
"""
logger.debug('object.visible(%s)', obj)
return obj.is_visible(context.scene)
def extract_mesh(obj, options, recalculate=False):
"""
:param obj:
:param options:
:param recalculate: (Default value = False)
"""
logger.debug('object.extract_mesh(%s, %s)', obj, options)
mesh_node = obj.to_mesh(context.scene, True, RENDER)
# transfer the geometry type to the extracted mesh
mesh_node.THREE_geometry_type = obj.data.THREE_geometry_type
# now determine whether or not to export using the geometry type
# set globally from the exporter's options or to use the local
# override on the mesh node itself
opt_buffer = options.get(constants.GEOMETRY_TYPE)
opt_buffer = opt_buffer == constants.BUFFER_GEOMETRY
prop_buffer = mesh_node.THREE_geometry_type == constants.BUFFER_GEOMETRY
# if doing buffer geometry it is imperative to triangulate the mesh
if opt_buffer or prop_buffer:
original_mesh = obj.data
obj.data = mesh_node
logger.debug('swapped %s for %s',
original_mesh.name,
mesh_node.name)
obj.select = True
bpy.context.scene.objects.active = obj
logger.info('Applying triangulation to %s', obj.data.name)
bpy.ops.object.modifier_add(type='TRIANGULATE')
bpy.ops.object.modifier_apply(apply_as='DATA',
modifier='Triangulate')
obj.data = original_mesh
obj.select = False
# recalculate the normals to face outwards, this is usually
# best after applying a modifiers, especialy for something
# like the mirror
if recalculate:
logger.info('Recalculating normals')
original_mesh = obj.data
obj.data = mesh_node
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
obj.data = original_mesh
if not options.get(constants.SCENE):
xrot = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh_node.transform(xrot * obj.matrix_world)
# now generate a unique name
index = 0
while True:
if index is 0:
mesh_name = '%sGeometry' % obj.data.name
else:
mesh_name = '%sGeometry.%d' % (obj.data.name, index)
try:
data.meshes[mesh_name]
index += 1
except KeyError:
break
mesh_node.name = mesh_name
mesh_node.update(calc_tessface=True)
mesh_node.calc_normals()
mesh_node.calc_tessface()
scale_ = options.get(constants.SCALE, 1)
mesh_node.transform(mathutils.Matrix.Scale(scale_, 4))
return mesh_node
def objects_using_mesh(mesh_node):
"""
:param mesh_node:
:return: list of object names
"""
logger.debug('object.objects_using_mesh(%s)', mesh_node)
for mesh_name, objects in _MESH_MAP.items():
if mesh_name == mesh_node.name:
return objects
else:
logger.warning('Could not find mesh mapping')
def prep_meshes(options):
"""Prep the mesh nodes. Preperation includes identifying:
- nodes that are on visible layers
- nodes that have export disabled
- nodes that have modifiers that need to be applied
:param options:
"""
logger.debug('object.prep_meshes(%s)', options)
mapping = {}
visible_layers = _visible_scene_layers()
for obj in data.objects:
if obj.type != MESH:
continue
# this is ideal for skipping controller or proxy nodes
# that may apply to a Blender but not a 3js scene
if not _on_visible_layer(obj, visible_layers):
logger.info('%s is not on a visible layer', obj.name)
continue
# if someone really insists on a visible node not being exportable
if not obj.THREE_export:
logger.info('%s export is disabled', obj.name)
continue
# need to apply modifiers before moving on, and before
# handling instancing. it is possible for 2 or more objects
# instance the same mesh but to not all use the same modifiers
# this logic identifies the object with modifiers and extracts
# the mesh making the mesh unique to this particular object
if len(obj.modifiers):
logger.info('%s has modifiers' % obj.name)
mesh_node = extract_mesh(obj, options, recalculate=True)
_MESH_MAP[mesh_node.name] = [obj]
continue
logger.info('adding mesh %s.%s to prep',
obj.name, obj.data.name)
manifest = mapping.setdefault(obj.data.name, [])
manifest.append(obj)
# now associate the extracted mesh node with all the objects
# that are instancing it
for objects in mapping.values():
mesh_node = extract_mesh(objects[0], options)
_MESH_MAP[mesh_node.name] = objects
def extracted_meshes():
"""
:return: names of extracted mesh nodes
"""
logger.debug('object.extracted_meshes()')
return [key for key in _MESH_MAP.keys()]
def _on_visible_layer(obj, visible_layers):
"""
:param obj:
:param visible_layers:
"""
is_visible = False
for index, layer in enumerate(obj.layers):
if layer and index in visible_layers:
is_visible = True
break
if not is_visible:
logger.info('%s is on a hidden layer', obj.name)
return is_visible
def _visible_scene_layers():
"""
:return: list of visiible layer indices
"""
visible_layers = []
for index, layer in enumerate(context.scene.layers):
if layer:
visible_layers.append(index)
return visible_layers
def _valid_node(obj, valid_types, options):
"""
:param obj:
:param valid_types:
:param options:
"""
if obj.type not in valid_types:
return False
# skip objects that are not on visible layers
visible_layers = _visible_scene_layers()
if not _on_visible_layer(obj, visible_layers):
return False
try:
export = obj.THREE_export
except AttributeError:
export = True
if not export:
return False
mesh_node = mesh(obj, options)
is_mesh = obj.type == MESH
# skip objects that a mesh could not be resolved
if is_mesh and not mesh_node:
return False
# secondary test; if a mesh node was resolved but no
# faces are detected then bow out
if is_mesh:
mesh_node = data.meshes[mesh_node]
if len(mesh_node.tessfaces) is 0:
return False
# if we get this far assume that the mesh is valid
return True
|
rhndg/openedx | refs/heads/master | common/djangoapps/student/admin.py | 20 | '''
django admin pages for courseware model
'''
from django import forms
from config_models.admin import ConfigurationModelAdmin
from django.contrib.auth.models import User
from student.models import UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration
from student.models import (
CourseEnrollment, Registration, PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from ratelimitbackend import admin
from student.roles import REGISTERED_ACCESS_ROLES
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta:
model = CourseAccessRole
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta:
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
admin.site.register(UserProfile)
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollment)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
|
aricaldeira/pyxmlsec | refs/heads/master | examples/encrypt1.py | 1 | #!/usr/bin/env python
#
# $Id: encrypt1.py 363 2006-01-01 18:03:07Z valos $
#
# PyXMLSec example: Encrypting data using a template file.
#
# Encrypts binary data using a template file and a DES key from a binary file
#
# Usage:
# ./encrypt1.py <xml-tmpl> <des-key-file>
#
# Example:
# ./encrypt1.py encrypt1-tmpl.xml deskey.bin > encrypt1-res.xml
#
# The result could be decrypted with decrypt1 example:
# ./decrypt1.py encrypt1-res.xml deskey.bin
#
# This is free software; see COPYING file in the source
# distribution for preciese wording.
#
# Copyright (C) 2003-2004 Valery Febvre <vfebvre@easter-eggs.com>
#
import sys
sys.path.insert(0, '../')
import libxml2
import xmlsec
def main():
secret_data = "Big secret"
assert(sys.argv)
if len(sys.argv) < 3:
print "Error: wrong number of arguments."
print "Usage: %s <xml-tmpl> <des-key-file>" % sys.argv[0]
return sys.exit(1)
# Init libxml library
libxml2.initParser()
libxml2.substituteEntitiesDefault(1)
# Init xmlsec library
if xmlsec.init() < 0:
print "Error: xmlsec initialization failed."
return sys.exit(-1)
# Check loaded library version
if xmlsec.checkVersion() != 1:
print "Error: loaded xmlsec library version is not compatible.\n"
sys.exit(-1)
# Init crypto library
if xmlsec.cryptoAppInit(None) < 0:
print "Error: crypto initialization failed."
# Init xmlsec-crypto library
if xmlsec.cryptoInit() < 0:
print "Error: xmlsec-crypto initialization failed."
res = encrypt_file(sys.argv[1], sys.argv[2], secret_data, len(secret_data))
# Shutdown xmlsec-crypto library
xmlsec.cryptoShutdown()
# Shutdown crypto library
xmlsec.cryptoAppShutdown()
# Shutdown xmlsec library
xmlsec.shutdown()
# Shutdown LibXML2
libxml2.cleanupParser()
sys.exit(res)
# Encrypts binary #data using template from tmpl_file and DES key from key_file.
# Returns 0 on success or a negative value if an error occurs.
def encrypt_file(tmpl_file, key_file, data, dataSize):
assert(tmpl_file)
assert(key_file)
assert(data)
# Load template
doc = libxml2.parseFile(tmpl_file)
if doc is None or doc.getRootElement() is None:
print "Error: unable to parse file \"%s\"" % tmpl_file
return cleanup(doc)
# Find start node
node = xmlsec.findNode(doc.getRootElement(), xmlsec.NodeEncryptedData,
xmlsec.EncNs)
if node is None:
print "Error: start node not found in \"%s\"" % tmpl_file
return cleanup(doc)
# Create encryption context, we don't need keys manager in this example
enc_ctx = xmlsec.EncCtx(None)
if enc_ctx is None:
print "Error: failed to create encryption context"
return cleanup(doc)
# Load DES key, assuming that there is not password
key = xmlsec.keyReadBinaryFile(xmlsec.keyDataDesId(), key_file)
if key is None:
print "Error failed to load DES key from binary file \"%s\"" % key_file
return cleanup(doc, enc_ctx)
# Set key name to the file name, this is just an example!
if key.setName(key_file) < 0:
print "Error: failed to set key name for key from \"%s\"" % key_file
return cleanup(doc, enc_ctx)
enc_ctx.encKey = key
# Encrypt the data
if enc_ctx.binaryEncrypt(node, data, dataSize) < 0:
print "Error: encryption failed"
return cleanup(doc, enc_ctx)
doc.dump("-")
# Success
return cleanup(doc, enc_ctx, 1)
def cleanup(doc=None, enc_ctx=None, res=-1):
if enc_ctx is not None:
enc_ctx.destroy()
if doc is not None:
doc.freeDoc()
return res
if __name__ == "__main__":
main()
|
appsembler/edx-platform | refs/heads/appsembler/tahoe/master | openedx/core/djangolib/translation_utils.py | 13 | from django.utils.translation import ugettext as _, override
from django.utils.formats import dateformat, get_format
def translate_date(date, language, date_format='DATE_FORMAT'):
"""
Converts the provided date object into a string, while translating
its value for the given language. Both the format of the date
as well as its values (i.e., name of the Month) are translated.
If language is Spainish, then the entire date string is returned in
lowercase. This is used to work around a bug in the Spanish django
month translations.
See EDUCATOR-2328 for more details.
For example:
date = datetime.datetime(2017, 12, 23)
date_in_spanish = translate_date(date, 'es')
assert date_in_spanish == '23 de deciembre de 2017'
"""
with override(language):
formatted_date = dateformat.format(
date,
get_format(date_format, lang=language, use_l10n=True),
)
if language and language.startswith('es'):
formatted_date = formatted_date.lower()
return formatted_date
|
bala4901/odoo | refs/heads/master | addons/portal_project/project.py | 103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class portal_project(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'project.project'
def _get_visibility_selection(self, cr, uid, context=None):
""" Override to add portal option. """
selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context)
idx = [item[0] for item in selection].index('public')
selection.insert((idx + 1), ('portal', 'Customer related project: visible through portal'))
return selection
# return [('public', 'All Users'),
# ('portal', 'Portal Users and Employees'),
# ('employees', 'Employees Only'),
# ('followers', 'Followers Only')]
|
xaviercobain88/framework-python | refs/heads/master | doc/_themes/flask_theme_support.py | 2228 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
davegoopot/parl_rube | refs/heads/master | scripts/twit.py | 1 | from twitter import Twitter
import ConfigParser as configparser
from twitter import OAuth
from time import sleep
import mcpi.minecraft as minecraft
def auth():
config = configparser.ConfigParser()
config.read("api.config")
auth_details = OAuth(config.get("api","token"),
config.get("api", "token_secret"),
config.get("api", "con_key"),
config.get("api","con_secret"))
return Twitter(auth=auth_details)
def find_max_id(query, t):
results = t.search.tweets(q=query, result_type="recent")
return results["search_metadata"]["max_id"]
def connect_to_mc():
world = minecraft.Minecraft.create("localhost")
return world
def set_minecraft_block():
world = connect_to_mc()
world.setBlock(0, 0, 0, 1)
def watch_for_tweet(query, t, max_id):
while True:
results = t.search.tweets(q=query,
since_id=max_id,
result_type="recent")
if results["statuses"]:
print(results["statuses"][0]["text"])
set_minecraft_block()
break
sleep(5)
if __name__ == "__main__":
query = "python"
t = auth()
max_id = find_max_id(query, t)
watch_for_tweet(query, t, max_id)
print(max_id)
|
hfp/tensorflow-xsmm | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py | 25 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConditionalTransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import transformed_distribution_test
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
ds = distributions
class _ChooseLocation(ConditionalBijector):
"""A Bijector which chooses between one of two location parameters."""
def __init__(self, loc, name="ChooseLocation"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[loc]):
self._loc = ops.convert_to_tensor(loc, name="loc")
super(_ChooseLocation, self).__init__(
graph_parents=[self._loc],
is_constant_jacobian=True,
validate_args=False,
forward_min_event_ndims=0,
name=name)
def _forward(self, x, z):
return x + self._gather_loc(z)
def _inverse(self, x, z):
return x - self._gather_loc(z)
def _inverse_log_det_jacobian(self, x, event_ndims, z=None):
return 0.
def _gather_loc(self, z):
z = ops.convert_to_tensor(z)
z = math_ops.cast((1 + z) / 2, dtypes.int32)
return array_ops.gather(self._loc, z)
class ConditionalTransformedDistributionTest(
transformed_distribution_test.TransformedDistributionTest):
def _cls(self):
return ds.ConditionalTransformedDistribution
def testConditioning(self):
with self.cached_session():
conditional_normal = ds.ConditionalTransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=_ChooseLocation(loc=[-100., 100.]))
z = [-1, +1, -1, -1, +1]
self.assertAllClose(
np.sign(conditional_normal.sample(
5, bijector_kwargs={"z": z}).eval()), z)
class ConditionalScalarToMultiTest(
transformed_distribution_test.ScalarToMultiTest):
def _cls(self):
return ds.ConditionalTransformedDistribution
if __name__ == "__main__":
test.main()
|
ovnicraft/server-tools | refs/heads/10.0 | sequence_check_digit/tests/__init__.py | 1 | # -*- coding: utf-8 -*-
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import test_check_digit
|
nck0405/ChennaiEden | refs/heads/master | modules/tests/helpers/report.py | 28 | # -*- coding: utf-8 -*-
"""
Tests for the report helper function in web2unittest.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
class ReportTestHelper(SeleniumUnitTest):
# -------------------------------------------------------------------------
def test_report_test_helper_normal(self):
self.login(account="admin", nexturl="asset/asset/report")
self.report(None, "Item", "Category", None,
("Motorcyle - DT50MX - Yamaha", "Default > Vehicle", 5))
# -------------------------------------------------------------------------
def test_report_test_helper_invalid_report(self):
self.login(account="admin", nexturl="asset/asset/report")
with self.assertRaises(self.InvalidReportOrGroupException):
self.report(None, "Category", "Invalid Report", None,
("Default > Equipment", "Timor-Leste Red Cross Society (CVTL)",
16))
# END =========================================================================
|
smmribeiro/intellij-community | refs/heads/master | python/testData/mover/multiCompound.py | 80 | for item in range(1,
3):
b = 2<caret>
|
rackerlabs/silverberg | refs/heads/master | silverberg/cluster.py | 1 | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet.defer import DeferredList
from silverberg.client import CQLClient
from twisted.internet.error import ConnectError
class RoundRobinCassandraCluster(object):
"""
Maintain several :py:class:`silverberg.client.CQLClient` instances
connected `seed_endpoints` using `keyspace`. Each time :py:func:`execute`
is called a client will be selected in a round-robin fashion.
:param seed_endpoints: A list of `IStreamClientEndpoint` providers to maintain
client connections to.
:param str keyspace: The cassandra keyspace to use.
:param str user: Optional username.
:param str password: Optional password.
:param bool disconnect_on_cancel: Should TCP connection be disconnected on
cancellation of running query? Defaults to False
"""
def __init__(self, seed_endpoints, keyspace, user=None, password=None,
disconnect_on_cancel=False):
self._seed_clients = [
CQLClient(endpoint, keyspace, user, password, disconnect_on_cancel)
for endpoint in seed_endpoints
]
self._client_idx = 0
def execute(self, *args, **kwargs):
"""
See :py:func:`silverberg.client.CQLClient.execute`
"""
num_clients = len(self._seed_clients)
start_client = (self._client_idx + 1) % num_clients
def _client_error(failure, client_i):
failure.trap(ConnectError)
client_i = (client_i + 1) % num_clients
if client_i == start_client:
return failure
else:
return _try_execute(client_i)
def _try_execute(client_i):
self._client_idx = client_i
d = self._seed_clients[client_i].execute(*args, **kwargs)
return d.addErrback(_client_error, client_i)
return _try_execute(start_client)
def disconnect(self):
"""
Disconnect from the cassandra cluster. Cassandara and Silverberg do
not require the connection to be closed before exiting. However, this
method may be useful if resources are constrained, or for testing
purposes.
:return: a :class:`DeferredList` that fires with a list of None's when every client
has disconnected.
"""
return DeferredList([client.disconnect() for client in self._seed_clients])
|
eljost/pysisyphus | refs/heads/master | deprecated/optimizers/ONIOMOpt.py | 1 | import logging
import numpy as np
from pysisyphus.constants import AU2KJPERMOL
from pysisyphus.optimizers.Optimizer import Optimizer
from pysisyphus.optimizers import poly_fit
logger = logging.getLogger("optimizer")
class ONIOMOpt(Optimizer):
def __init__(self, geometry, *args, micro_cycles=None, **kwargs):
print("The ONIOMOpt optimizer is not really ready yet!")
super().__init__(geometry, *args, **kwargs)
layers = self.geometry.layers
print(f"found {len(layers)} layers: {layers}")
if micro_cycles is None:
micro_cycles = np.ones(len(layers), dtype=int)
try:
micro_cycles[0] = 5
except IndexError:
micro_cycles = None
self.micro_cycles = micro_cycles
self.log(f"Micro cycles: {self.micro_cycles}")
self.calc = self.geometry.calculator
if len(layers) > 1:
self.layer_indices = [self.calc.atom_inds_in_layer(i, exclude_inner=True)
for i, _ in enumerate(layers)
]
else:
self.layer_indices = [[i for i, atom in enumerate(self.geometry.atoms)] ]
# Conjugate gradient, previous search direction
self.prev_directions = [None for layer in layers]
# Initial step length for line search
self.trial_lengths = [0.1 for layer in layers]
def cg_step(self, atoms, coords, index, beta_formula="HS", full=False):
if full:
res = self.geometry.get_energy_and_forces_at(coords)
forces = res["forces"]
energy = res["energy"]
else:
energy, forces = self.calc.calc_layer(atoms, coords, index,
parent_correction=False)
def stat(forces):
f3d = forces.reshape(-1, 3)
if not full:
f3d = f3d[self.layer_indices[index]]
# max_ = np.abs(forces).max()
# rms_ = np.sqrt(np.mean(forces**2))
max_ = np.abs(f3d).max()
rms_ = np.sqrt(np.mean(f3d**2))
self.log(f"\tStart: max={max_:.6f}, rms={rms_:.6f}")
stat(forces)
prev_grad = -forces
prev_energy = energy
# Direction of steepst descent in the first cycle
prev_direction = self.prev_directions[index]
if prev_direction is None:
prev_direction = forces
atom_indices = self.layer_indices[index]
if not full:
if atom_indices == [10, 11, 12, 13, 14]:
atom_indices = [7] + atom_indices
_ = np.zeros_like(prev_direction).reshape(-1, 3)
_[atom_indices] = prev_direction.reshape(-1, 3)[atom_indices]
prev_direction = _.flatten()
trial_length = self.trial_lengths[index]
norm = np.linalg.norm(prev_direction)
for i in range(3):
self.log(f"Linesearch with trial step length {trial_length:.6f}")
trial_step = trial_length * prev_direction / norm
trial_coords = coords + trial_step
if full:
res = self.geometry.get_energy_and_forces_at(trial_coords)
trial_forces = res["forces"]
trial_energy = res["energy"]
else:
trial_energy, trial_forces = self.calc.calc_layer(atoms, trial_coords, index,
parent_correction=False)
ls_kwargs = {
"cur_energy": trial_energy,
"cur_grad": -trial_forces,
"prev_energy": prev_energy,
"prev_grad": prev_grad,
"prev_step": trial_step,
"cubic_max_x": 2.,
"quartic_max_x": 4.,
}
ls_result = poly_fit.poly_line_search(**ls_kwargs)
if ls_result[0] is not None:
energy, grad, step = ls_result
trial_length = np.linalg.norm(step)
break
else:
trial_length *= 2
self.log("Linesearch did not produced a result. Trying longer "
"trial step length.")
else:
self.trial_lengths[index] = 0.1
self.prev_directions[index] = forces
step = forces
norm = np.linalg.norm(step)
if norm > 0.5:
step = step/norm * 0.5
self.log("Steepest descent FALLBACK")
return step
# ls_result = poly_fit.poly_line_search(**ls_kwargs)
# raise Exception("Linesearchfailed")
# Hestensen-Stiefel
if beta_formula == "HS":
beta = grad.dot(grad - prev_grad) / (grad - prev_grad).dot(prev_direction)
# Polak-Ribiere
elif beta_formula == "PR":
beta = grad.dot(grad - prev_grad) / prev_grad.dot(prev_grad)
else:
raise Exception("Invalid 'beta_formula'. Use 'PR' or 'HS'!")
beta = max(0, beta)
self.log(f"beta = {beta:.4f}")
self.prev_directions[index] = -grad + beta*prev_direction
self.trial_lengths[index] = trial_length
return step
def sd_step(self, atoms, coords, index, full=True):
if full:
res = self.geometry.get_energy_and_forces_at(coords)
forces = res["forces"]
energy = res["energy"]
else:
energy, forces = self.calc.calc_layer(atoms, coords, index,
parent_correction=False)
def stat(forces):
f3d = forces.reshape(-1, 3)
f3d = f3d[self.layer_indices[index]]
# max_ = np.abs(forces).max()
# rms_ = np.sqrt(np.mean(forces**2))
max_ = np.abs(f3d).max()
rms_ = np.sqrt(np.mean(f3d**2))
self.log(f"\tStart: max={max_:.6f}, rms={rms_:.6f}")
stat(forces)
prev_grad = -forces
prev_energy = energy
# Steepest descent direction
prev_direction = forces
# atom_indices = self.layer_indices[index]
# if atom_indices == [10, 11, 12, 13, 14]:
# atom_indices = [7] + atom_indices
# self.log(f"\tatom_indices={atom_indices}")
# _ = np.zeros_like(prev_direction).reshape(-1, 3)
# _[atom_indices] = prev_direction.reshape(-1, 3)[atom_indices]
# prev_direction = _.flatten()
trial_length = self.trial_lengths[index]
norm = np.linalg.norm(prev_direction)
for i in range(3):
self.log(f"\tLinesearch with trial step length {trial_length:.6f}")
trial_step = trial_length * prev_direction / norm
trial_coords = coords + trial_step
res = self.geometry.get_energy_and_forces_at(trial_coords)
if full:
res = self.geometry.get_energy_and_forces_at(trial_coords)
trial_forces = res["forces"]
trial_energy = res["energy"]
else:
trial_forces = res["forces"]
trial_energy = res["energy"]
ls_kwargs = {
"cur_energy": trial_energy,
"cur_grad": -trial_forces,
"prev_energy": prev_energy,
"prev_grad": prev_grad,
"prev_step": trial_step,
"cubic_max": 2.,
"quartic_max": 4.,
}
ls_result = poly_fit.poly_line_search(**ls_kwargs)
if ls_result[0] is not None:
energy, grad, step = ls_result
trial_length = np.linalg.norm(step)
break
else:
trial_length *= 2
self.log("Linesearch did not produced a result. Trying longer "
"trial step length.")
else:
# Reset everything
self.trial_lengths[index] = 0.1
self.prev_directions[index] = forces
step = forces
self.log("Steepest descent FALLBACK")
self.trial_lengths[index] = trial_length
return step
# def optimize(self):
# atoms = self.geometry.atoms
# forces = self.geometry.forces
# energy = self.geometry.energy
# self.forces.append(forces)
# self.energies.append(energy)
# if self.cur_cycle > 0:
# self.log(f"Current energy={energy:.6f}")
# dE = energy - self.energies[-2]
# dE_str = "raised" if dE >= 0 else "lowered"
# dEkj = dE*AU2KJPERMOL
# self.log(f"Current energy: {energy:.6f}, energy {dE_str} "
# f"by {dEkj:.2f} kJ mol⁻¹")
# if dE_str == "raised":
# print("Raised!")
# # Calling copy is important, otherwise we would modify the geomtries
# # coordinates.
# coords3d = self.geometry.coords3d.copy()
# li0 = self.layer_indices[0]
# li1 = self.layer_indices[1]
# coords3d_1 = coords3d[li1].copy()
# # step_func = self.cg_step
# step_func = self.sd_step
# layer = 0
# l0_step = np.zeros_like(coords3d)
# # Microcycles
# self.log(f"Starting microcycles for layer {layer}")
# for i in range(self.micro_cycles[0]):
# self.log(f"Microcycle {i}")
# step = step_func(atoms, coords3d.flatten(), layer)
# # Only set coordinates of atoms in layer 0
# _ = l0_step.copy()
# _[li0] = step.reshape(-1, 3)[li0]
# coords3d += _
# np.testing.assert_allclose(coords3d[li1], coords3d_1)
# # # Step for inner layer
# layer = 1
# self.log(f"\n\nStarting cycle for inner layer {layer}")
# # step = self.cg_step(atoms, coords3d.flatten(), layer, full=True)
# step = step_func(atoms, coords3d.flatten(), layer)
# # print(step.reshape(-1,3))
# coords3d += step.reshape(-1,3)
# step = (coords3d - self.geometry.coords3d).flatten()
# return step
def optimize(self):
atoms = self.geometry.atoms
coords = self.geometry.coords
forces = self.geometry.forces
energy = self.geometry.energy
self.forces.append(forces)
self.energies.append(energy)
if self.cur_cycle > 0:
self.log(f"Current energy={energy:.6f}")
dE = energy - self.energies[-2]
dE_str = "raised" if dE >= 0 else "lowered"
dEkj = dE*AU2KJPERMOL
self.log(f"Current energy: {energy:.6f}, energy {dE_str} "
f"by {dEkj:.2f} kJ mol⁻¹")
if dE_str == "raised":
print("Raised!")
return self.cg_step(atoms, coords, 0, full=True, beta_formula="PR")
# return self.sd_step(atoms, coords, 0, full=True)
|
grpc/grpc | refs/heads/master | tools/run_tests/lb_interop_tests/gen_build_yaml.py | 13 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs = ['java']
config = {
'name':
'client_falls_back_because_balancer_connection_broken_%s' %
transport_sec,
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': 'insecure',
'short_stream': False,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
],
'backend_configs': [{
'transport_sec': backend_sec,
},],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print(yaml.dump({
'lb_interop_test_scenarios': all_scenarios,
}))
|
jmanday/Herramientas-IV | refs/heads/master | CDE/tests/coreutils_pwd_test/testme.py | 2 | # Test to make sure the coreutils pwd program prints out the right thing
# even when we move the CDE package to another directory
# weird that pwd doesn't do the right thing when you move directories
# ... it seems to truncate the buffer to near the ACTUAL pwd size
# coreutils pwd doesn't actually use the getcwd syscall ... instead it
# does its own thang so we might be hosed
# http://www.google.com/codesearch/p?hl=en#g6W0qk4jBZE/src/bin/coreutils/src/pwd.c&q=pwd.c%20coreutils&sa=N&cd=1&ct=rc
import sys
sys.path.insert(0, '..')
from cde_test_common import *
def checker_func():
assert os.path.isfile(CDE_ROOT_DIR + '/bin/pwd')
generic_test_runner(["pwd"], checker_func)
|
gylian/sickbeard | refs/heads/master | lib/subliminal/services/podnapisiweb.py | 8 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import DownloadFailedError
from ..language import Language, language_set
from ..subtitles import ResultSubtitle
from ..utils import get_keywords
from ..videos import Episode, Movie
from bs4 import BeautifulSoup
import guessit
import logging
import re
from subliminal.subtitles import get_subtitle_path
from sickbeard import db
from sickbeard import logger as glog
logger = logging.getLogger("subliminal")
class PodnapisiWeb(ServiceBase):
server_url = 'http://simple.podnapisi.net'
site_url = 'http://www.podnapisi.net'
api_based = True
user_agent = 'Subliminal/0.6'
videos = [Episode, Movie]
require_video = False
languages = language_set(['Albanian', 'Arabic', 'Spanish (Argentina)', 'Belarusian', 'Bosnian', 'Portuguese (Brazil)', 'Bulgarian', 'Catalan',
'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Persian',
'Finnish', 'French', 'German', 'gre', 'Kalaallisut', 'Hebrew', 'Hindi', 'Hungarian',
'Icelandic', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Kazakh', 'Korean', 'Latvian',
'Lithuanian', 'Macedonian', 'Malay', 'Norwegian', 'Polish', 'Portuguese', 'Romanian',
'Russian', 'Serbian', 'Sinhala', 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Thai',
'Turkish', 'Ukrainian', 'Vietnamese'])
language_map = {Language('Albanian'): 29, Language('Arabic'): 12, Language('Spanish (Argentina)'): 14, Language('Belarusian'): 50,
Language('Bosnian'): 10, Language('Portuguese (Brazil)'): 48, Language('Bulgarian'): 33, Language('Catalan'): 53,
Language('Chinese'): 17, Language('Croatian'): 38, Language('Czech'): 7, Language('Danish'): 24,
Language('Dutch'): 23, Language('English'): 2, Language('Estonian'): 20, Language('Persian'): 52,
Language('Finnish'): 31, Language('French'): 8, Language('German'): 5, Language('gre'): 16,
Language('Kalaallisut'): 57, Language('Hebrew'): 22, Language('Hindi'): 42, Language('Hungarian'): 15,
Language('Icelandic'): 6, Language('Indonesian'): 54, Language('Irish'): 49, Language('Italian'): 9,
Language('Japanese'): 11, Language('Kazakh'): 58, Language('Korean'): 4, Language('Latvian'): 21,
Language('Lithuanian'): 19, Language('Macedonian'): 35, Language('Malay'): 55,
Language('Norwegian'): 3, Language('Polish'): 26, Language('Portuguese'): 32, Language('Romanian'): 13,
Language('Russian'): 27, Language('Serbian'): 36, Language('Sinhala'): 56, Language('Slovak'): 37,
Language('Slovenian'): 1, Language('Spanish'): 28, Language('Swedish'): 25, Language('Thai'): 44,
Language('Turkish'): 30, Language('Ukrainian'): 46, Language('Vietnamese'): 51,
29: Language('Albanian'), 12: Language('Arabic'), 14: Language('Spanish (Argentina)'), 50: Language('Belarusian'),
10: Language('Bosnian'), 48: Language('Portuguese (Brazil)'), 33: Language('Bulgarian'), 53: Language('Catalan'),
17: Language('Chinese'), 38: Language('Croatian'), 7: Language('Czech'), 24: Language('Danish'),
23: Language('Dutch'), 2: Language('English'), 20: Language('Estonian'), 52: Language('Persian'),
31: Language('Finnish'), 8: Language('French'), 5: Language('German'), 16: Language('gre'),
57: Language('Kalaallisut'), 22: Language('Hebrew'), 42: Language('Hindi'), 15: Language('Hungarian'),
6: Language('Icelandic'), 54: Language('Indonesian'), 49: Language('Irish'), 9: Language('Italian'),
11: Language('Japanese'), 58: Language('Kazakh'), 4: Language('Korean'), 21: Language('Latvian'),
19: Language('Lithuanian'), 35: Language('Macedonian'), 55: Language('Malay'), 40: Language('Chinese'),
3: Language('Norwegian'), 26: Language('Polish'), 32: Language('Portuguese'), 13: Language('Romanian'),
27: Language('Russian'), 36: Language('Serbian'), 47: Language('Serbian'), 56: Language('Sinhala'),
37: Language('Slovak'), 1: Language('Slovenian'), 28: Language('Spanish'), 25: Language('Swedish'),
44: Language('Thai'), 30: Language('Turkish'), 46: Language('Ukrainian'), Language('Vietnamese'): 51}
def list_checked(self, video, languages):
if isinstance(video, Movie):
return self.query(video.path or video.release, languages, video.title, year=video.year,
keywords=get_keywords(video.guess))
if isinstance(video, Episode):
return self.query(video.path or video.release, languages, video.series, season=video.season,
episode=video.episode, keywords=get_keywords(video.guess))
def query(self, filepath, languages, title, season=None, episode=None, year=None, keywords=None):
myDB = db.DBConnection()
myDBcache = db.DBConnection("cache.db")
sql_show_id = myDB.select("SELECT tvdb_id, show_name FROM tv_shows WHERE show_name LIKE ?", ['%'+title+'%'])
if sql_show_id[0][0]:
sql_scene = myDB.select("SELECT scene_season, scene_episode FROM tv_episodes WHERE showid = ? and season = ? and episode = ?", [sql_show_id[0][0],season,episode])
real_name=sql_show_id[0][1]
if sql_scene[0][0]:
season=sql_scene[0][0]
episode= sql_scene[0][1]
sql_custom_names = myDBcache.select("SELECT show_name FROM scene_exceptions WHERE tvdb_id = ? and show_name<> ? ORDER BY exception_id asc", [sql_show_id[0][0],real_name])
if sql_custom_names:
title=sql_custom_names[0][0]
glog.log(u'Searching Subtitles on Podnapisiweb with title : %s season : %s episode : %s' % (title,season,episode))
params = {'sXML': 1, 'sK': title, 'sJ': ','.join([str(self.get_code(l)) for l in languages])}
if season is not None:
params['sTS'] = season
if episode is not None:
params['sTE'] = episode
if year is not None:
params['sY'] = year
if keywords is not None:
params['sR'] = keywords
r = self.session.get(self.server_url + '/ppodnapisi/search', params=params)
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
subtitles = []
soup = BeautifulSoup(r.content, self.required_features)
print soup
for sub in soup('subtitle'):
if 'n' in sub.flags:
logger.debug(u'Skipping hearing impaired')
continue
language = l
confidence = float(sub.rating.text) / 5.0
sub_keywords = set()
for release in sub.release.text.split():
sub_keywords |= get_keywords(guessit.guess_file_info(release + '.srt', 'autodetect'))
sub_path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(sub_path, language, self.__class__.__name__.lower(),
sub.url.text, confidence=confidence, keywords=sub_keywords)
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
r = self.session.get(subtitle.link)
if r.status_code != 200:
raise DownloadFailedError()
soup = BeautifulSoup(r.content)
self.download_zip_file(self.server_url + soup.find('a', href=re.compile('download'))['href'], subtitle.path)
return subtitle
Service = PodnapisiWeb
|
akirse/MailService | refs/heads/master | helloworld.py | 1 | from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, webapp World!')
self.response.out.write('<p>Yeah 5</p>')
application = webapp.WSGIApplication([('/', MainPage)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
kobejean/tensorflow | refs/heads/master | tensorflow/contrib/metrics/python/ops/metric_ops.py | 5 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
strict_mode=False):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
no smaller than the target precision, return the corresponding recall at
the threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
if not strict_mode:
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
else:
# We aim to find the threshold where the precision is minimum but no smaller
# than the target precision.
# The rationale:
# 1. Compute the difference between precisions (by different thresholds) and
# the target precision.
# 2. Take the reciprocal of the values by the above step. The intention is
# to make the positive values rank before negative values and also the
# smaller positives rank before larger positives.
tf_index = math_ops.argmax(
math_ops.div(1.0, precisions - precision + _EPSILON),
0,
output_type=dtypes.int32)
def _return_good_recall():
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
return control_flow_ops.cond(precisions[tf_index] >= precision,
_return_good_recall, lambda: .0)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None,
strict_mode=False):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
above the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value',
strict_mode)
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op', strict_mode)
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
This metric keeps track of the denominator in `tf.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values = math_ops.to_float(values)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count_, num_values)
count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
ops.add_to_collections(updates_collections, update_count_op)
return count_, update_count_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
|
GNOME/chronojump | refs/heads/master | dev-utils/monodevelop_fix_resources.py | 1 | #!/usr/bin/env python3
import xml.etree.ElementTree
import re
"""
When developing ChronoJump if a new resource image is added it has to be added in the Makefiles
(this is what it's used for compiling for the release, etc.).
If a developer wants to use MonoDevelop: the file has to be added in MonoDevelop as well doing:
-Open the project in MonoDevelop
-Right click and press Add -> Add Files...
-Select the file and if asked leave it in the same directory as now
-Look for the file, right click, Build Action: select Embedded Resource (if multiple files have been added:
it's possible to select multiple files)
-Now there are two options:
-In the properties name: change the "Resource ID" to the filename (it would be images/file_name.png). If only
one file has been added this is enough
-If many files have been added: this script can be executed to fix it
How to do it better? The Makefile files could generate the MonoDevelop Solution files. All the information is there.
"""
def fix_resources_local_names():
original_file = "../chronojump.csproj"
print("Will read {} (if the project have changed: better close MonoDevelop to make sure that all is saved)".format(original_file))
project_file = open(original_file, 'r')
project_string = project_file.read()
# Removes the xmlns to avoid ElementTree to be prepended everywhere.
# I didn't find a better solution.
match = re.search('.* (xmlns="[^"]+").*', project_string)
assert match
xmlns = match.group(1)
project_xml = re.sub(' ' + xmlns, '', project_string, count=1)
# Reads the string. fromstring returns Element, it converts it to ElementTree.
element_tree = xml.etree.ElementTree.ElementTree(xml.etree.ElementTree.fromstring(project_xml))
# Restores the xmlns
root_attributes = element_tree.getroot().attrib
root_attributes['xmlns'] = xmlns
for embedded_resource in element_tree.findall("./ItemGroup/EmbeddedResource"):
attributes = embedded_resource.attrib
if 'Include' not in attributes:
# It's not our type of resource
continue
for node in embedded_resource:
# If it already has a LogicalName: it doesn't change this Element
if node.tag == "LogicalName":
continue
# It uses the Include name (e.g. images/mini/chronopic.png) to set the LogicalName: mini/chronopic.png
new_path = attributes['Include'].split("\\")[1:] # Removes images/ / first part of the path
new_path = "/".join(new_path)
logical_name = xml.etree.ElementTree.Element("LogicalName")
logical_name.text = new_path
# Adds the new Element
embedded_resource.append(logical_name)
new_file = "new_chronojump.csproj"
print("Saved the file to {}. Close MonoDevelop and Copy it manually to {}".format(new_file, original_file))
element_tree.write(new_file)
if __name__ == "__main__":
fix_resources_local_names() |
111pontes/ydk-py | refs/heads/master | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_mpls_fwd_oper.py | 1 | """ Cisco_IOS_XE_mpls_fwd_oper
This module contains a collection of YANG definitions for
monitoring memory usage of processes in a Network Element.Copyright (c) 2016\-2017 by Cisco Systems, Inc.All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MplsForwardingTable(object):
"""
Data nodes for MPLS forwarding table entries.
.. attribute:: local_label_entry
The list of MPLS forwarding table entries
**type**\: list of :py:class:`LocalLabelEntry <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry>`
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.local_label_entry = YList()
self.local_label_entry.parent = self
self.local_label_entry.name = 'local_label_entry'
class LocalLabelEntry(object):
"""
The list of MPLS forwarding table entries.
.. attribute:: local_label <key>
Value of local\-label
**type**\: int
**range:** 0..4294967295
.. attribute:: forwarding_info
**type**\: list of :py:class:`ForwardingInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo>`
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.local_label = None
self.forwarding_info = YList()
self.forwarding_info.parent = self
self.forwarding_info.name = 'forwarding_info'
class ForwardingInfo(object):
"""
.. attribute:: outgoing_interface <key>
The name of the outgoing interface. Example possible values are 1.none, 2.drop, 3.<tunnel\-name>, 4.<interface\-name>, 5.aggregate/<vrf\-name> etc
**type**\: one of the below types:
**type**\: :py:class:`OutgoingInterfaceEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.OutgoingInterfaceEnum>`
----
**type**\: str
----
.. attribute:: connection_info
The Prefix or tunnel\-id info corresponding to this label. Ex\: 1) for l2ckt, a number tunnel\-id value. 2) for ipv4, a prefix with [V] tag (113.113.113.113/32[V]). 3) for TE, a pefix with [T] tag (113.113.113.113/32[T])
**type**\: :py:class:`ConnectionInfo <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo>`
.. attribute:: label_switched_bytes
The number of bytes switched using this label
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: next_hop
Next hop information. Example possible values are 1.point2point, 2.<ip\-address>
**type**\: one of the below types:
**type**\: :py:class:`NextHopEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.NextHopEnum>`
----
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
----
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
----
.. attribute:: outgoing_label
Value of outgoing\-label if exists or the type of non\-present label
**type**\: one of the below types:
**type**\: int
**range:** 0..4294967295
----
**type**\: :py:class:`OutgoingLabelEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.OutgoingLabelEnum>`
----
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.outgoing_interface = None
self.connection_info = MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo()
self.connection_info.parent = self
self.label_switched_bytes = None
self.next_hop = None
self.outgoing_label = None
class NextHopEnum(Enum):
"""
NextHopEnum
Next hop information.
Example possible values are
1.point2point,
2.<ip\-address>
.. data:: point2point = 0
"""
point2point = 0
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.NextHopEnum']
class OutgoingInterfaceEnum(Enum):
"""
OutgoingInterfaceEnum
The name of the outgoing interface.
Example possible values are 1.none, 2.drop, 3.<tunnel\-name>,
4.<interface\-name>, 5.aggregate/<vrf\-name> etc.
.. data:: drop = 0
.. data:: punt = 1
.. data:: aggregate = 2
.. data:: exception = 3
.. data:: none = 4
"""
drop = 0
punt = 1
aggregate = 2
exception = 3
none = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.OutgoingInterfaceEnum']
class OutgoingLabelEnum(Enum):
"""
OutgoingLabelEnum
Value of outgoing\-label if exists or
the type of non\-present label.
.. data:: no_label = 0
.. data:: pop_label = 1
.. data:: aggregate = 2
.. data:: explicit_null = 3
.. data:: illegal = 4
"""
no_label = 0
pop_label = 1
aggregate = 2
explicit_null = 3
illegal = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.OutgoingLabelEnum']
class ConnectionInfo(object):
"""
The Prefix or tunnel\-id info corresponding to this label.
Ex\: 1) for l2ckt, a number tunnel\-id value.
2) for ipv4, a prefix with [V] tag (113.113.113.113/32[V]).
3) for TE, a pefix with [T] tag (113.113.113.113/32[T])
.. attribute:: ip
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: l2ckt_id
**type**\: int
**range:** 0..4294967295
.. attribute:: mask
**type**\: int
**range:** 0..65535
.. attribute:: nh_id
**type**\: int
**range:** 0..4294967295
.. attribute:: tunnel_id
**type**\: int
**range:** 0..4294967295
.. attribute:: tunnel_tp
**type**\: :py:class:`TunnelTp <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp>`
.. attribute:: type
The type of connection represented by this label
**type**\: :py:class:`TypeEnum <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TypeEnum>`
.. attribute:: vrf_id
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.ip = None
self.l2ckt_id = None
self.mask = None
self.nh_id = None
self.tunnel_id = None
self.tunnel_tp = MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp()
self.tunnel_tp.parent = self
self.type = None
self.vrf_id = None
class TypeEnum(Enum):
"""
TypeEnum
The type of connection represented by this label
.. data:: ip = 0
.. data:: tunnel = 1
.. data:: nh_id = 2
.. data:: l2ckt = 3
.. data:: ip_vrf = 4
.. data:: none = 5
.. data:: tunnel_tp = 6
"""
ip = 0
tunnel = 1
nh_id = 2
l2ckt = 3
ip_vrf = 4
none = 5
tunnel_tp = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TypeEnum']
class TunnelTp(object):
"""
.. attribute:: dst_id
**type**\: :py:class:`DstId <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.DstId>`
.. attribute:: src_id
**type**\: :py:class:`SrcId <ydk.models.cisco_ios_xe.Cisco_IOS_XE_mpls_fwd_oper.MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.SrcId>`
.. attribute:: tunnel
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.dst_id = MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.DstId()
self.dst_id.parent = self
self.src_id = MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.SrcId()
self.src_id.parent = self
self.tunnel = None
class SrcId(object):
"""
.. attribute:: global_
**type**\: int
**range:** 0..4294967295
.. attribute:: node
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.global_ = None
self.node = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-fwd-oper:src-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.global_ is not None:
return True
if self.node is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.SrcId']['meta_info']
class DstId(object):
"""
.. attribute:: global_
**type**\: int
**range:** 0..4294967295
.. attribute:: node
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls-fwd-ios-xe-oper'
_revision = '2017-02-07'
def __init__(self):
self.parent = None
self.global_ = None
self.node = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-fwd-oper:dst-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.global_ is not None:
return True
if self.node is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp.DstId']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-fwd-oper:tunnel-tp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dst_id is not None and self.dst_id._has_data():
return True
if self.src_id is not None and self.src_id._has_data():
return True
if self.tunnel is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo.TunnelTp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-fwd-oper:connection-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ip is not None:
return True
if self.l2ckt_id is not None:
return True
if self.mask is not None:
return True
if self.nh_id is not None:
return True
if self.tunnel_id is not None:
return True
if self.tunnel_tp is not None and self.tunnel_tp._has_data():
return True
if self.type is not None:
return True
if self.vrf_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo.ConnectionInfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.outgoing_interface is None:
raise YPYModelError('Key property outgoing_interface is None')
return self.parent._common_path +'/Cisco-IOS-XE-mpls-fwd-oper:forwarding-info[Cisco-IOS-XE-mpls-fwd-oper:outgoing-interface = ' + str(self.outgoing_interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.outgoing_interface is not None:
return True
if self.connection_info is not None and self.connection_info._has_data():
return True
if self.label_switched_bytes is not None:
return True
if self.next_hop is not None:
return True
if self.outgoing_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry.ForwardingInfo']['meta_info']
@property
def _common_path(self):
if self.local_label is None:
raise YPYModelError('Key property local_label is None')
return '/Cisco-IOS-XE-mpls-fwd-oper:mpls-forwarding-table/Cisco-IOS-XE-mpls-fwd-oper:local-label-entry[Cisco-IOS-XE-mpls-fwd-oper:local-label = ' + str(self.local_label) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_label is not None:
return True
if self.forwarding_info is not None:
for child_ref in self.forwarding_info:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable.LocalLabelEntry']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XE-mpls-fwd-oper:mpls-forwarding-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_label_entry is not None:
for child_ref in self.local_label_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_mpls_fwd_oper as meta
return meta._meta_table['MplsForwardingTable']['meta_info']
|
3nids/QGIS | refs/heads/master | scripts/qgis_fixes/fix_nonzero.py | 77 | from lib2to3.fixes.fix_nonzero import FixNonzero
|
manishpatell/erpcustomizationssaiimpex123qwe | refs/heads/master | addons/fleet/fleet.py | 266 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
import datetime
from openerp import tools
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
from dateutil.relativedelta import relativedelta
def str_to_datetime(strdate):
return datetime.datetime.strptime(strdate, tools.DEFAULT_SERVER_DATE_FORMAT)
class fleet_vehicle_cost(osv.Model):
_name = 'fleet.vehicle.cost'
_description = 'Cost related to a vehicle'
_order = 'date desc, vehicle_id asc'
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, False)
for record in self.browse(cr,uid,ids,context=context):
if record.odometer_id:
res[record.id] = record.odometer_id.value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if not value:
raise except_orm(_('Operation not allowed!'), _('Emptying the odometer value of a vehicle is not allowed.'))
date = self.browse(cr, uid, id, context=context).date
if not(date):
date = fields.date.context_today(self, cr, uid, context=context)
vehicle_id = self.browse(cr, uid, id, context=context).vehicle_id
data = {'value': value, 'date': date, 'vehicle_id': vehicle_id.id}
odometer_id = self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
return self.write(cr, uid, id, {'odometer_id': odometer_id}, context=context)
_columns = {
'name': fields.related('vehicle_id', 'name', type="char", string='Name', store=True),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log'),
'cost_subtype_id': fields.many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost'),
'amount': fields.float('Total Price'),
'cost_type': fields.selection([('contract', 'Contract'), ('services','Services'), ('fuel','Fuel'), ('other','Other')], 'Category of the cost', help='For internal purpose only', required=True),
'parent_id': fields.many2one('fleet.vehicle.cost', 'Parent', help='Parent cost to this current cost'),
'cost_ids': fields.one2many('fleet.vehicle.cost', 'parent_id', 'Included Services'),
'odometer_id': fields.many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Odometer Value', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
'date' :fields.date('Date',help='Date when the cost has been executed'),
'contract_id': fields.many2one('fleet.vehicle.log.contract', 'Contract', help='Contract attached to this cost'),
'auto_generated': fields.boolean('Automatically Generated', readonly=True, required=True),
}
_defaults ={
'cost_type': 'other',
}
def create(self, cr, uid, data, context=None):
#make sure that the data are consistent with values of parent and contract records given
if 'parent_id' in data and data['parent_id']:
parent = self.browse(cr, uid, data['parent_id'], context=context)
data['vehicle_id'] = parent.vehicle_id.id
data['date'] = parent.date
data['cost_type'] = parent.cost_type
if 'contract_id' in data and data['contract_id']:
contract = self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, data['contract_id'], context=context)
data['vehicle_id'] = contract.vehicle_id.id
data['cost_subtype_id'] = contract.cost_subtype_id.id
data['cost_type'] = contract.cost_type
if 'odometer' in data and not data['odometer']:
#if received value for odometer is 0, then remove it from the data as it would result to the creation of a
#odometer log with 0, which is to be avoided
del(data['odometer'])
return super(fleet_vehicle_cost, self).create(cr, uid, data, context=context)
class fleet_vehicle_tag(osv.Model):
_name = 'fleet.vehicle.tag'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class fleet_vehicle_state(osv.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages")
}
_sql_constraints = [('fleet_state_name_unique','unique(name)', 'State name already exists')]
class fleet_vehicle_model(osv.Model):
def _model_name_get_fnc(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.modelname
if record.brand_id.name:
name = record.brand_id.name + ' / ' + name
res[record.id] = name
return res
def on_change_brand(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {'value': {'image_medium': False}}
brand = self.pool.get('fleet.vehicle.model.brand').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': brand.image,
}
}
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
_columns = {
'name': fields.function(_model_name_get_fnc, type="char", string='Name', store=True),
'modelname': fields.char('Model name', required=True),
'brand_id': fields.many2one('fleet.vehicle.model.brand', 'Model Brand', required=True, help='Brand of the vehicle'),
'vendors': fields.many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors'),
'image': fields.related('brand_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('brand_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('brand_id', 'image_small', type="binary", string="Logo (small)"),
}
class fleet_vehicle_model_brand(osv.Model):
_name = 'fleet.vehicle.model.brand'
_description = 'Brand model of the vehicle'
_order = 'name asc'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Brand Name', required=True),
'image': fields.binary("Logo",
help="This field holds the image used as logo for the brand, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized logo of the brand. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the brand. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class fleet_vehicle(osv.Model):
_inherit = 'mail.thread'
def _vehicle_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = record.model_id.brand_id.name + '/' + record.model_id.modelname + ' / ' + record.license_plate
return res
def return_action_to_open(self, cr, uid, ids, context=None):
""" This opens the xml view specified in xml_id for the current vehicle """
if context is None:
context = {}
if context.get('xml_id'):
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet', context['xml_id'], context=context)
res['context'] = context
res['context'].update({'default_vehicle_id': ids[0]})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
return False
def act_show_log_cost(self, cr, uid, ids, context=None):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet','fleet_vehicle_costs_act', context=context)
res['context'] = context
res['context'].update({
'default_vehicle_id': ids[0],
'search_default_parent_false': True
})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, 0)
for record in self.browse(cr,uid,ids,context=context):
ids = self.pool.get('fleet.vehicle.odometer').search(cr, uid, [('vehicle_id', '=', record.id)], limit=1, order='value desc')
if len(ids) > 0:
res[record.id] = self.pool.get('fleet.vehicle.odometer').browse(cr, uid, ids[0], context=context).value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if value:
date = fields.date.context_today(self, cr, uid, context=context)
data = {'value': value, 'date': date, 'vehicle_id': id}
return self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
def _search_get_overdue_contract_reminder(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today,))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_contract_renewal_due_soon(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
datetime_today = datetime.datetime.strptime(today, tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = str((datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date > %s AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today, limit_date))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _get_contract_reminder_fnc(self, cr, uid, ids, field_names, unknow_none, context=None):
res= {}
for record in self.browse(cr, uid, ids, context=context):
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'toclose') and element.expiration_date:
current_date_str = fields.date.context_today(self, cr, uid, context=context)
due_time_str = element.expiration_date
current_date = str_to_datetime(current_date_str)
due_time = str_to_datetime(due_time_str)
diff_time = (due_time-current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < 15 and diff_time >= 0:
due_soon = True;
total += 1
if overdue or due_soon:
ids = self.pool.get('fleet.vehicle.log.contract').search(cr,uid,[('vehicle_id', '=', record.id), ('state', 'in', ('open', 'toclose'))], limit=1, order='expiration_date asc')
if len(ids) > 0:
#we display only the name of the oldest overdue/due soon contract
name=(self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, ids[0], context=context).cost_subtype_id.name)
res[record.id] = {
'contract_renewal_overdue': overdue,
'contract_renewal_due_soon': due_soon,
'contract_renewal_total': (total - 1), #we remove 1 from the real total for display purposes
'contract_renewal_name': name,
}
return res
def _get_default_state(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'vehicle_state_active')
except ValueError:
model_id = False
return model_id
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Odometer = self.pool['fleet.vehicle.odometer']
LogFuel = self.pool['fleet.vehicle.log.fuel']
LogService = self.pool['fleet.vehicle.log.services']
LogContract = self.pool['fleet.vehicle.log.contract']
Cost = self.pool['fleet.vehicle.cost']
return {
vehicle_id: {
'odometer_count': Odometer.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'fuel_logs_count': LogFuel.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'service_count': LogService.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'contract_count': LogContract.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'cost_count': Cost.search_count(cr, uid, [('vehicle_id', '=', vehicle_id), ('parent_id', '=', False)], context=context)
}
for vehicle_id in ids
}
_name = 'fleet.vehicle'
_description = 'Information on a vehicle'
_order= 'license_plate asc'
_columns = {
'name': fields.function(_vehicle_name_get_fnc, type="char", string='Name', store=True),
'company_id': fields.many2one('res.company', 'Company'),
'license_plate': fields.char('License Plate', required=True, help='License plate number of the vehicle (ie: plate number for a car)'),
'vin_sn': fields.char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False),
'driver_id': fields.many2one('res.partner', 'Driver', help='Driver of the vehicle'),
'model_id': fields.many2one('fleet.vehicle.model', 'Model', required=True, help='Model of the vehicle'),
'log_fuel': fields.one2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs'),
'log_services': fields.one2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs'),
'log_contracts': fields.one2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts'),
'cost_count': fields.function(_count_all, type='integer', string="Costs" , multi=True),
'contract_count': fields.function(_count_all, type='integer', string='Contracts', multi=True),
'service_count': fields.function(_count_all, type='integer', string='Services', multi=True),
'fuel_logs_count': fields.function(_count_all, type='integer', string='Fuel Logs', multi=True),
'odometer_count': fields.function(_count_all, type='integer', string='Odometer', multi=True),
'acquisition_date': fields.date('Acquisition Date', required=False, help='Date when the vehicle has been bought'),
'color': fields.char('Color', help='Color of the vehicle'),
'state_id': fields.many2one('fleet.vehicle.state', 'State', help='Current state of the vehicle', ondelete="set null"),
'location': fields.char('Location', help='Location of the vehicle (garage, ...)'),
'seats': fields.integer('Seats Number', help='Number of seats of the vehicle'),
'doors': fields.integer('Doors Number', help='Number of doors of the vehicle'),
'tag_ids' :fields.many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id','tag_id', 'Tags', copy=False),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Last Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.selection([('kilometers', 'Kilometers'),('miles','Miles')], 'Odometer Unit', help='Unit of the odometer ',required=True),
'transmission': fields.selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle'),
'fuel_type': fields.selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle'),
'horsepower': fields.integer('Horsepower'),
'horsepower_tax': fields.float('Horsepower Taxation'),
'power': fields.integer('Power', help='Power in kW of the vehicle'),
'co2': fields.float('CO2 Emissions', help='CO2 emissions of the vehicle'),
'image': fields.related('model_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('model_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('model_id', 'image_small', type="binary", string="Logo (small)"),
'contract_renewal_due_soon': fields.function(_get_contract_reminder_fnc, fnct_search=_search_contract_renewal_due_soon, type="boolean", string='Has Contracts to renew', multi='contract_info'),
'contract_renewal_overdue': fields.function(_get_contract_reminder_fnc, fnct_search=_search_get_overdue_contract_reminder, type="boolean", string='Has Contracts Overdued', multi='contract_info'),
'contract_renewal_name': fields.function(_get_contract_reminder_fnc, type="text", string='Name of contract to renew soon', multi='contract_info'),
'contract_renewal_total': fields.function(_get_contract_reminder_fnc, type="integer", string='Total of contracts due or overdue minus one', multi='contract_info'),
'car_value': fields.float('Car Value', help='Value of the bought vehicle'),
}
_defaults = {
'doors': 5,
'odometer_unit': 'kilometers',
'state_id': _get_default_state,
}
def on_change_model(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {}
model = self.pool.get('fleet.vehicle.model').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': model.image,
}
}
def create(self, cr, uid, data, context=None):
context = dict(context or {}, mail_create_nolog=True)
vehicle_id = super(fleet_vehicle, self).create(cr, uid, data, context=context)
vehicle = self.browse(cr, uid, vehicle_id, context=context)
self.message_post(cr, uid, [vehicle_id], body=_('%s %s has been added to the fleet!') % (vehicle.model_id.name,vehicle.license_plate), context=context)
return vehicle_id
def write(self, cr, uid, ids, vals, context=None):
"""
This function write an entry in the openchatter whenever we change important information
on the vehicle like the model, the drive, the state of the vehicle or its license plate
"""
for vehicle in self.browse(cr, uid, ids, context):
changes = []
if 'model_id' in vals and vehicle.model_id.id != vals['model_id']:
value = self.pool.get('fleet.vehicle.model').browse(cr,uid,vals['model_id'],context=context).name
oldmodel = vehicle.model_id.name or _('None')
changes.append(_("Model: from '%s' to '%s'") %(oldmodel, value))
if 'driver_id' in vals and vehicle.driver_id.id != vals['driver_id']:
value = self.pool.get('res.partner').browse(cr,uid,vals['driver_id'],context=context).name
olddriver = (vehicle.driver_id.name) or _('None')
changes.append(_("Driver: from '%s' to '%s'") %(olddriver, value))
if 'state_id' in vals and vehicle.state_id.id != vals['state_id']:
value = self.pool.get('fleet.vehicle.state').browse(cr,uid,vals['state_id'],context=context).name
oldstate = vehicle.state_id.name or _('None')
changes.append(_("State: from '%s' to '%s'") %(oldstate, value))
if 'license_plate' in vals and vehicle.license_plate != vals['license_plate']:
old_license_plate = vehicle.license_plate or _('None')
changes.append(_("License Plate: from '%s' to '%s'") %(old_license_plate, vals['license_plate']))
if len(changes) > 0:
self.message_post(cr, uid, [vehicle.id], body=", ".join(changes), context=context)
vehicle_id = super(fleet_vehicle,self).write(cr, uid, ids, vals, context)
return True
class fleet_vehicle_odometer(osv.Model):
_name='fleet.vehicle.odometer'
_description='Odometer log for a vehicle'
_order='date desc'
def _vehicle_log_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if not name:
name = record.date
elif record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'unit': odometer_unit,
}
}
_columns = {
'name': fields.function(_vehicle_log_name_get_fnc, type="char", string='Name', store=True),
'date': fields.date('Date'),
'value': fields.float('Odometer Value', group_operator="max"),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True),
'unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
}
_defaults = {
'date': fields.date.context_today,
}
class fleet_vehicle_log_fuel(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def on_change_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
else :
return {}
def on_change_price_per_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
else :
return {}
def on_change_amount(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value': {'price_per_liter': round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value': {'liter': round(amount / price_per_liter,2),}}
elif liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value': {'amount': round(liter * price_per_liter,2),}}
else :
return {}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_refueling')
except ValueError:
model_id = False
return model_id
_name = 'fleet.vehicle.log.fuel'
_description = 'Fuel log for vehicles'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_columns = {
'liter': fields.float('Liter'),
'price_per_liter': fields.float('Price Per Liter'),
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference', size=64),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'fuel',
}
class fleet_vehicle_log_services(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_service_8')
except ValueError:
model_id = False
return model_id
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.services'
_description = 'Services for vehicles'
_columns = {
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference'),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'services'
}
class fleet_service_type(osv.Model):
_name = 'fleet.service.type'
_description = 'Type of services available on a vehicle'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'category': fields.selection([('contract', 'Contract'), ('service', 'Service'), ('both', 'Both')], 'Category', required=True, help='Choose wheter the service refer to contracts, vehicle services or both'),
}
class fleet_vehicle_log_contract(osv.Model):
def scheduler_manage_auto_costs(self, cr, uid, context=None):
#This method is called by a cron task
#It creates costs for contracts having the "recurring cost" field setted, depending on their frequency
#For example, if a contract has a reccuring cost of 200 with a weekly frequency, this method creates a cost of 200 on the first day of each week, from the date of the last recurring costs in the database to today
#If the contract has not yet any recurring costs in the database, the method generates the recurring costs from the start_date to today
#The created costs are associated to a contract thanks to the many2one field contract_id
#If the contract has no start_date, no cost will be created, even if the contract has recurring costs
vehicle_cost_obj = self.pool.get('fleet.vehicle.cost')
d = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT).date()
contract_ids = self.pool.get('fleet.vehicle.log.contract').search(cr, uid, [('state','!=','closed')], offset=0, limit=None, order=None,context=None, count=False)
deltas = {'yearly': relativedelta(years=+1), 'monthly': relativedelta(months=+1), 'weekly': relativedelta(weeks=+1), 'daily': relativedelta(days=+1)}
for contract in self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, contract_ids, context=context):
if not contract.start_date or contract.cost_frequency == 'no':
continue
found = False
last_cost_date = contract.start_date
if contract.generated_cost_ids:
last_autogenerated_cost_id = vehicle_cost_obj.search(cr, uid, ['&', ('contract_id','=',contract.id), ('auto_generated','=',True)], offset=0, limit=1, order='date desc',context=context, count=False)
if last_autogenerated_cost_id:
found = True
last_cost_date = vehicle_cost_obj.browse(cr, uid, last_autogenerated_cost_id[0], context=context).date
startdate = datetime.datetime.strptime(last_cost_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()
if found:
startdate += deltas.get(contract.cost_frequency)
while (startdate <= d) & (startdate <= datetime.datetime.strptime(contract.expiration_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()):
data = {
'amount': contract.cost_generated,
'date': startdate.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'vehicle_id': contract.vehicle_id.id,
'cost_subtype_id': contract.cost_subtype_id.id,
'contract_id': contract.id,
'auto_generated': True
}
cost_id = self.pool.get('fleet.vehicle.cost').create(cr, uid, data, context=context)
startdate += deltas.get(contract.cost_frequency)
return True
def scheduler_manage_contract_expiration(self, cr, uid, context=None):
#This method is called by a cron task
#It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
datetime_today = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = (datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
ids = self.search(cr, uid, ['&', ('state', '=', 'open'), ('expiration_date', '<', limit_date)], offset=0, limit=None, order=None, context=context, count=False)
res = {}
for contract in self.browse(cr, uid, ids, context=context):
if contract.vehicle_id.id in res:
res[contract.vehicle_id.id] += 1
else:
res[contract.vehicle_id.id] = 1
for vehicle, value in res.items():
self.pool.get('fleet.vehicle').message_post(cr, uid, vehicle, body=_('%s contract(s) need(s) to be renewed and/or closed!') % (str(value)), context=context)
return self.write(cr, uid, ids, {'state': 'toclose'}, context=context)
def run_scheduler(self, cr, uid, context=None):
self.scheduler_manage_auto_costs(cr, uid, context=context)
self.scheduler_manage_contract_expiration(cr, uid, context=context)
return True
def _vehicle_contract_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name += ' / '+ record.cost_subtype_id.name
if record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'odometer_unit': odometer_unit,
}
}
def compute_next_year_date(self, strdate):
oneyear = datetime.timedelta(days=365)
curdate = str_to_datetime(strdate)
return datetime.datetime.strftime(curdate + oneyear, tools.DEFAULT_SERVER_DATE_FORMAT)
def on_change_start_date(self, cr, uid, ids, strdate, enddate, context=None):
if (strdate):
return {'value': {'expiration_date': self.compute_next_year_date(strdate),}}
return {}
def get_days_left(self, cr, uid, ids, prop, unknow_none, context=None):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
res = {}
for record in self.browse(cr, uid, ids, context=context):
if (record.expiration_date and (record.state == 'open' or record.state == 'toclose')):
today = str_to_datetime(time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
renew_date = str_to_datetime(record.expiration_date)
diff_time = (renew_date-today).days
res[record.id] = diff_time > 0 and diff_time or 0
else:
res[record.id] = -1
return res
def act_renew_contract(self, cr, uid, ids, context=None):
assert len(ids) == 1, "This operation should only be done for 1 single contract at a time, as it it suppose to open a window as result"
for element in self.browse(cr, uid, ids, context=context):
#compute end date
startdate = str_to_datetime(element.start_date)
enddate = str_to_datetime(element.expiration_date)
diffdate = (enddate - startdate)
default = {
'date': fields.date.context_today(self, cr, uid, context=context),
'start_date': datetime.datetime.strftime(str_to_datetime(element.expiration_date) + datetime.timedelta(days=1), tools.DEFAULT_SERVER_DATE_FORMAT),
'expiration_date': datetime.datetime.strftime(enddate + diffdate, tools.DEFAULT_SERVER_DATE_FORMAT),
}
newid = super(fleet_vehicle_log_contract, self).copy(cr, uid, element.id, default, context=context)
mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'fleet_vehicle_log_contract_form')
return {
'name':_("Renew Contract"),
'view_mode': 'form',
'view_id': modid,
'view_type': 'tree,form',
'res_model': 'fleet.vehicle.log.contract',
'type': 'ir.actions.act_window',
'nodestroy': True,
'domain': '[]',
'res_id': newid,
'context': {'active_id':newid},
}
def _get_default_contract_type(self, cr, uid, context=None):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_contract_leasing')
except ValueError:
model_id = False
return model_id
def on_change_indic_cost(self, cr, uid, ids, cost_ids, context=None):
totalsum = 0.0
for element in cost_ids:
if element and len(element) == 3 and isinstance(element[2], dict):
totalsum += element[2].get('amount', 0.0)
return {
'value': {
'sum_cost': totalsum,
}
}
def _get_sum_cost(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for contract in self.browse(cr, uid, ids, context=context):
totalsum = 0
for cost in contract.cost_ids:
totalsum += cost.amount
res[contract.id] = totalsum
return res
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order='state desc,expiration_date'
_columns = {
'name': fields.function(_vehicle_contract_name_get_fnc, type="text", string='Name', store=True),
'start_date': fields.date('Contract Start Date', help='Date when the coverage of the contract begins'),
'expiration_date': fields.date('Contract Expiration Date', help='Date when the coverage of the contract expirates (by default, one year after begin date)'),
'days_left': fields.function(get_days_left, type='integer', string='Warning Date'),
'insurer_id' :fields.many2one('res.partner', 'Supplier'),
'purchaser_id': fields.many2one('res.partner', 'Contractor', help='Person to which the contract is signed for'),
'ins_ref': fields.char('Contract Reference', size=64, copy=False),
'state': fields.selection([('open', 'In Progress'), ('toclose','To Close'), ('closed', 'Terminated')],
'Status', readonly=True, help='Choose wheter the contract is still valid or not',
copy=False),
'notes': fields.text('Terms and Conditions', help='Write here all supplementary informations relative to this contract', copy=False),
'cost_generated': fields.float('Recurring Cost Amount', help="Costs paid at regular intervals, depending on the cost frequency. If the cost frequency is set to unique, the cost will be logged at the start date"),
'cost_frequency': fields.selection([('no','No'), ('daily', 'Daily'), ('weekly','Weekly'), ('monthly','Monthly'), ('yearly','Yearly')], 'Recurring Cost Frequency', help='Frequency of the recuring cost', required=True),
'generated_cost_ids': fields.one2many('fleet.vehicle.cost', 'contract_id', 'Generated Costs'),
'sum_cost': fields.function(_get_sum_cost, type='float', string='Indicative Costs Total'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'purchaser_id': lambda self, cr, uid, ctx: self.pool.get('res.users').browse(cr, uid, uid, context=ctx).partner_id.id or False,
'date': fields.date.context_today,
'start_date': fields.date.context_today,
'state':'open',
'expiration_date': lambda self, cr, uid, ctx: self.compute_next_year_date(fields.date.context_today(self, cr, uid, context=ctx)),
'cost_frequency': 'no',
'cost_subtype_id': _get_default_contract_type,
'cost_type': 'contract',
}
def contract_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'closed'}, context=context)
def contract_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
class fleet_contract_state(osv.Model):
_name = 'fleet.contract.state'
_description = 'Contains the different possible status of a leasing contract'
_columns = {
'name':fields.char('Contract Status', required=True),
}
|
mancoast/CPythonPyc_test | refs/heads/master | cpython/272_test_codecencodings_jp.py | 150 | #!/usr/bin/env python
#
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class Test_CP932(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
("abc\x81\x00\x81\x00\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x81\x00\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x81\x00\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x81\x00\x82\x84", "ignore", u"abc\uff44"),
# sjis vs cp932
("\\\x7e", "replace", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\uff3c\u2225\uff0d"),
)
class Test_EUC_JISX0213(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
tstring = test_multibytecodec_support.load_teststring('euc_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
("\xa1\xc0", "strict", u"\uff3c"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
eucjp_commontests = (
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
)
class Test_EUC_JP_COMPAT(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
tstring = test_multibytecodec_support.load_teststring('euc_jp')
codectests = eucjp_commontests + (
("\xa1\xc0\\", "strict", u"\uff3c\\"),
(u"\xa5", "strict", "\x5c"),
(u"\u203e", "strict", "\x7e"),
)
shiftjis_commonenctests = (
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
)
class Test_SJIS_COMPAT(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
("\\\x7e", "strict", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "strict", u"\uff3c\u2016\u2212"),
)
class Test_SJISX0213(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
tstring = test_multibytecodec_support.load_teststring('shift_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
# sjis vs cp932
("\\\x7e", "replace", u"\xa5\u203e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\x5c\u2016\u2212"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
pekeler/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/testing/gmock/scripts/generator/cpp/__init__.py | 12133432 | |
havard024/prego | refs/heads/master | venv/lib/python2.7/site-packages/django/contrib/localflavor/de/__init__.py | 12133432 | |
xunil154/ExampleREST | refs/heads/master | example/notes/migrations/__init__.py | 12133432 | |
dex4er/django | refs/heads/1.6.x | tests/admin_scripts/management/commands/__init__.py | 12133432 | |
bkaradzic/SwiftShader | refs/heads/master | third_party/llvm-7.0/llvm/utils/lit/tests/shtest-output-printing.py | 5 | # Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-output-printing > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-output-printing :: basic.txt
# CHECK-NEXT: *** TEST 'shtest-output-printing :: basic.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output
# CHECK-NEXT: --
# CHECK-NEXT: $ ":" "RUN: at line 1"
# CHECK-NEXT: $ "true"
# CHECK-NEXT: $ ":" "RUN: at line 2"
# CHECK-NEXT: $ "echo" "hi"
# CHECK-NEXT: # command output:
# CHECK-NEXT: hi
#
# CHECK: $ ":" "RUN: at line 3"
# CHECK-NEXT: $ "wc" "missing-file"
# CHECK-NEXT: # redirected output from '{{.*(/|\\\\)}}basic.txt.tmp.out':
# CHECK-NEXT: missing-file{{.*}} No such file or directory
# CHECK: note: command had no output on stdout or stderr
# CHECK-NEXT: error: command failed with exit status: 1
|
rahuldhote/odoo | refs/heads/8.0 | addons/payment_adyen/models/adyen.py | 165 | # -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
adyen_tx_values = dict(tx_values)
adyen_tx_values.update({
'merchantReference': tx_values['reference'],
'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'currencyCode': tx_values['currency'] and tx_values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': partner_values['lang'],
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
})
if adyen_tx_values.get('return_url'):
adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')})
adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values)
return partner_values, adyen_tx_values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'adyen_psp_reference': fields.char('Adyen PSP Reference'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Adyen: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'adyen_psp_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'adyen_psp_reference': data.get('pspReference'),
})
return True
else:
error = 'Adyen: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
3dfxmadscientist/CBSS | refs/heads/master | addons/stock/wizard/__init__.py | 73 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_traceability
import stock_move
import stock_splitinto
import stock_partial_picking
import stock_partial_move
import stock_inventory_merge
import stock_fill_inventory
import stock_inventory_line_split
import stock_invoice_onshipping
import stock_location_product
import stock_change_standard_price
import stock_return_picking
import stock_change_product_qty
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
damonkohler/sl4a | refs/heads/master | python/src/Lib/CGIHTTPServer.py | 59 | """CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script,
and return a boolean.
This function sets self.cgi_info to a tuple (dir, rest)
when it returns True, where dir is the directory part before
the CGI script name. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
save_cwd = os.getcwd()
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
os.chdir(save_cwd)
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
bryceguo/robotframework-selenium2library | refs/heads/master | src/Selenium2Library/locators/tableelementfinder.py | 33 | from selenium.common.exceptions import NoSuchElementException
from Selenium2Library import utils
from elementfinder import ElementFinder
class TableElementFinder(object):
def __init__(self, element_finder=None):
if not element_finder:
element_finder = ElementFinder()
self._element_finder = element_finder
self._locator_suffixes = {
('css', 'default'): [''],
('css', 'content'): [''],
('css', 'header'): [' th'],
('css', 'footer'): [' tfoot td'],
('css', 'row'): [' tr:nth-child(%s)'],
('css', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('xpath', 'default'): [''],
('xpath', 'content'): ['//*'],
('xpath', 'header'): ['//th'],
('xpath', 'footer'): ['//tfoot//td'],
('xpath', 'row'): ['//tr[%s]//*'],
('xpath', 'col'): ['//tr//*[self::td or self::th][%s]']
};
def find(self, browser, table_locator):
locators = self._parse_table_locator(table_locator, 'default')
return self._search_in_locators(browser, locators, None)
def find_by_content(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'content')
return self._search_in_locators(browser, locators, content)
def find_by_header(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'header')
return self._search_in_locators(browser, locators, content)
def find_by_footer(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'footer')
return self._search_in_locators(browser, locators, content)
def find_by_row(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'row')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def find_by_col(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'col')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def _parse_table_locator(self, table_locator, location_method):
if table_locator.startswith('xpath='):
table_locator_type = 'xpath'
else:
if not table_locator.startswith('css='):
table_locator = "css=table#%s" % table_locator
table_locator_type = 'css'
locator_suffixes = self._locator_suffixes[(table_locator_type, location_method)]
return map(
lambda locator_suffix: table_locator + locator_suffix,
locator_suffixes)
def _search_in_locators(self, browser, locators, content):
for locator in locators:
elements = self._element_finder.find(browser, locator)
for element in elements:
if content is None: return element
element_text = element.text
if element_text and content in element_text:
return element
return None
|
eunchong/build | refs/heads/master | scripts/master/unittests/annotator_test.py | 1 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Source file for annotated command testcases."""
import os
import time
import unittest
import test_env # pylint: disable=W0611,W0403
from buildbot.status import builder
import mock
from twisted.internet import defer
from master import chromium_step
# Mocks confuse pylint.
# pylint: disable=E1101
# pylint: disable=R0201
class FakeBuild(mock.Mock):
def __init__(self, command):
mock.Mock.__init__(self)
self.properties = {}
self.command = command
def setProperty(self, propname, propval, source, runtime=True):
self.properties[propname] = (propval, source, runtime)
class FakeCommand(mock.Mock):
def __init__(self):
mock.Mock.__init__(self)
self.rc = builder.SUCCESS
self.status = None
def addLog(self, name):
return self.status.addLog(name)
class FakeLog(object):
def __init__(self, name):
self.text = ''
self.name = name
self.chunkSize = 1024
self.finished = False
def addStdout(self, data):
assert not self.finished
self.text += data
def addStderr(self, data):
assert not self.finished
def getName(self):
return self.name
def addHeader(self, msg):
assert not self.finished
def finish(self):
self.finished = True
class FakeBuildstepStatus(mock.Mock):
def __init__(self, name, build):
mock.Mock.__init__(self)
self.name = name
self.urls = {}
self.aliases = {}
self.build = build
self.text = None
self.step = None
self.logs = []
self.started = False
self.finished = False
def stepStarted(self):
self.started = True
def isStarted(self):
return self.started
def setText(self, text):
self.text = text
def setText2(self, text):
self.text = text
def getBuild(self):
return self.build
def getURLs(self):
return self.urls.copy()
def getAliases(self):
return self.aliases.copy()
def addURL(self, label, url):
self.urls[label] = url
def addAlias(self, label, url, text=None):
self.aliases.setdefault(label, []).append((text, url))
def addLog(self, log):
l = FakeLog(log)
self.logs.append(l)
return l
def getLogs(self):
return self.logs
def getLog(self, log):
candidates = [x for x in self.logs if x.name == log]
if candidates:
return candidates[0]
else:
return None
def stepFinished(self, status):
self.finished = True
self.getBuild().receivedStatus.append(status)
def isFinished(self):
return self.finished
def setHidden(self, hidden):
return None
def isWaitingForLocks(self):
return False
class FakeBuildStatus(mock.Mock):
def __init__(self):
mock.Mock.__init__(self)
self.steps = []
self.receivedStatus = []
self.logs = []
def addStepWithName(self, step_name):
newstep = FakeBuildstepStatus(step_name, self)
self.steps.append(newstep)
return newstep
class AnnotatorCommandsTest(unittest.TestCase):
def setUp(self):
self.buildstatus = FakeBuildStatus()
self.command = FakeCommand()
self.step = chromium_step.AnnotatedCommand(name='annotated_steps',
description='annotated_steps',
command=self.command)
self.step.build = FakeBuild(self.command)
self.step_status = self.buildstatus.addStepWithName('annotated_steps')
self.step.setStepStatus(self.step_status)
self.command.status = self.step_status
preamble = self.command.addLog('preamble')
self.step.script_observer.addSection('annotated_steps',
step=self.step_status)
self.step.script_observer.sections[0]['log'] = preamble
self.step.script_observer.sections[0]['started'] = time.time()
self.step.script_observer.cursor = self.step.script_observer.sections[0]
def handleOutputLine(self, line):
self.step.script_observer.cursor['step'].started = True
if not self.step.script_observer.cursor['log']:
self.step.script_observer.cursor['log'] = (
self.step.script_observer.cursor['step'].addLog('stdio'))
self.step.script_observer.cursor['started'] = time.time()
self.step.script_observer.handleOutputLine(line)
def handleReturnCode(self, code):
self.step.script_observer['step'].stepFinished()
self.step.script_observer.handleReturnCode(code)
def startNewStep(self, name='example_step'):
self.handleOutputLine('@@@SEED_STEP %s@@@' % name)
self.handleOutputLine('@@@SEED_STEP_TEXT@%s@example_text@@@' % name)
self.handleOutputLine('@@@STEP_CURSOR %s@@@' % name)
def assertNothingToWait(self):
self.assertEquals(0, len(self.step.script_observer.stepsToWait()))
def testAddAnnotatedSteps(self):
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@BUILD_STEP done@@@')
self.step.script_observer.handleReturnCode(0)
stepnames = [x['step'].name for x in self.step.script_observer.sections]
statuses = [x['status'] for x in self.step.script_observer.sections]
self.assertEquals(stepnames, ['annotated_steps', 'step', 'step2', 'done'])
self.assertEquals(statuses, 4 * [builder.SUCCESS])
self.assertEquals(self.step.script_observer.annotate_status,
builder.SUCCESS)
def testBuildFailure(self):
self.handleOutputLine('@@@STEP_FAILURE@@@')
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.step.script_observer.handleReturnCode(0)
statuses = [x['status'] for x in self.step.script_observer.sections]
self.assertEquals(statuses, [builder.FAILURE, builder.SUCCESS])
self.assertEquals(self.step.script_observer.annotate_status,
builder.FAILURE)
def testBuildException(self):
self.handleOutputLine('@@@STEP_EXCEPTION@@@')
self.handleOutputLine('@@@BUILD_STEP step@@@')
statuses = [x['status'] for x in self.step.script_observer.sections]
self.assertEquals(statuses, [builder.EXCEPTION, builder.SUCCESS])
self.assertEquals(self.step.script_observer.annotate_status,
builder.EXCEPTION)
def testStepLink(self):
self.handleOutputLine('@@@STEP_LINK@label@http://localhost/@@@')
testurls = [('label', 'http://localhost/', None)]
testurl_hash = {'label': 'http://localhost/'}
annotatedLinks = [x['links'] for x in self.step.script_observer.sections]
stepLinks = [x['step'].getURLs() for x in
self.step.script_observer.sections]
self.assertEquals(annotatedLinks, [testurls])
self.assertEquals(stepLinks, [testurl_hash])
def testStepAlias(self):
self.handleOutputLine('@@@STEP_LINK@alias-->label@http://localhost/@@@')
testurls = [('label', 'http://localhost/', 'alias')]
testalias_hash = {'label': [('alias', 'http://localhost/')]}
annotatedLinks = [x['links'] for x in self.step.script_observer.sections]
stepAliases = [x['step'].getAliases() for x in
self.step.script_observer.sections]
self.assertEquals(annotatedLinks, [testurls])
self.assertEquals(stepAliases, [testalias_hash])
def testStepWarning(self):
self.handleOutputLine('@@@STEP_WARNINGS@@@')
self.handleOutputLine('@@@BUILD_STEP step@@@')
statuses = [x['status'] for x in self.step.script_observer.sections]
self.assertEquals(statuses, [builder.WARNINGS, builder.SUCCESS])
self.assertEquals(self.step.script_observer.annotate_status,
builder.WARNINGS)
def testStepText(self):
self.handleOutputLine('@@@STEP_TEXT@example_text@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_TEXT@example_text2@@@')
self.handleOutputLine('@@@BUILD_STEP step3@@@')
self.handleOutputLine('@@@STEP_TEXT@example_text3@@@')
texts = [x['step_text'] for x in self.step.script_observer.sections]
self.assertEquals(texts, [['example_text'], ['example_text2'],
['example_text3']])
def testStepTextSeeded(self):
self.handleOutputLine('@@@SEED_STEP example_step@@@')
self.handleOutputLine('@@@SEED_STEP_TEXT@example_step@example_text@@@')
self.handleOutputLine('@@@STEP_CURSOR example_step@@@')
texts = [x['step_text'] for x in self.step.script_observer.sections]
start = [x['step'].isStarted() for x in self.step.script_observer.sections]
self.assertEquals(texts, [[], ['example_text']])
self.assertEquals(start, [True, False])
def testStepClear(self):
self.handleOutputLine('@@@STEP_TEXT@example_text@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_TEXT@example_text2@@@')
self.handleOutputLine('@@@STEP_CLEAR@@@')
texts = [x['step_text'] for x in self.step.script_observer.sections]
self.assertEquals(texts, [['example_text'], []])
def testStepSummaryText(self):
self.handleOutputLine('@@@STEP_SUMMARY_TEXT@example_text@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_SUMMARY_TEXT@example_text2@@@')
self.handleOutputLine('@@@BUILD_STEP step3@@@')
self.handleOutputLine('@@@STEP_SUMMARY_TEXT@example_text3@@@')
texts = [x['step_summary_text'] for x in self.step.script_observer.sections]
self.assertEquals(texts, [['example_text'], ['example_text2'],
['example_text3']])
def testStepSummaryClear(self):
self.handleOutputLine('@@@STEP_SUMMARY_TEXT@example_text@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_SUMMARY_TEXT@example_text2@@@')
self.handleOutputLine('@@@STEP_SUMMARY_CLEAR@@@')
texts = [x['step_summary_text'] for x in self.step.script_observer.sections]
self.assertEquals(texts, [['example_text'], []])
def testHaltOnFailure(self):
self.step.deferred = defer.Deferred()
self.handleOutputLine('@@@HALT_ON_FAILURE@@@')
catchFailure = lambda r: self.assertEquals(
self.step_status.getBuild().receivedStatus,
[builder.FAILURE, builder.FAILURE])
self.step.deferred.addBoth(catchFailure)
self.startNewStep()
self.handleOutputLine('@@@STEP_FAILURE@@@')
self.assertEquals(self.step.script_observer.annotate_status,
builder.FAILURE)
def testReturnCode(self):
self.step.script_observer.handleReturnCode(1)
self.assertEquals(self.step.script_observer.annotate_status,
builder.FAILURE)
def testHonorZeroReturnCode(self):
self.handleOutputLine('@@@HONOR_ZERO_RETURN_CODE@@@')
self.startNewStep()
self.handleOutputLine('@@@STEP_FAILURE@@@')
self.step.script_observer.handleReturnCode(0)
self.assertEquals(self.step.script_observer.annotate_status,
builder.SUCCESS)
def testProperty(self):
self.handleOutputLine(
'@@@SET_BUILD_PROPERTY@cool@["option", 1, {"dog": "cat"}]@@@')
self.assertDictEqual(
self.step.build.properties,
{'cool':
(["option", 1, {"dog": "cat"}], 'Annotation(annotated_steps)', True)
}
)
self.handleOutputLine('@@@SET_BUILD_PROPERTY@cool@1@@@')
self.handleOutputLine('@@@BUILD_STEP@different_step@@@')
self.handleOutputLine('@@@SET_BUILD_PROPERTY@cool@"option2"@@@')
self.assertDictEqual(
self.step.build.properties,
{'cool': ('option2', 'Annotation(different_step)', True)}
)
def testLogLine(self):
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@this is line one@@@')
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@this is line two@@@')
self.handleOutputLine('@@@STEP_LOG_END@test_log@@@')
logs = self.step_status.getLogs()
self.assertEquals(len(logs), 2)
self.assertEquals(logs[1].getName(), 'test_log')
self.assertEquals(self.step_status.getLog('test_log').text,
'this is line one\nthis is line two')
def testForNoPreambleAfter1Step(self):
self.handleOutputLine('this line is part of the preamble')
self.step.scriptComplete(self.command)
logs = self.step_status.getLogs()
# buildbot will append 'stdio' for the first non-annotated section
# but it won't show up in self.step_status.getLogs()
self.assertEquals(len(logs), 0)
def testForPreambleAfter2Steps(self):
self.handleOutputLine('this line is part of the preamble')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.step.scriptComplete(self.command)
logs = [l for x in self.buildstatus.steps for l in x.getLogs()]
# annotator adds a stdio for each buildstep added
self.assertEquals([x.getName() for x in logs], ['preamble', 'stdio'])
def testForPreambleAfter3Steps(self):
self.handleOutputLine('this line is part of the preamble')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@BUILD_STEP step3@@@')
self.step.scriptComplete(self.command)
logs = [l for x in self.buildstatus.steps for l in x.getLogs()]
self.assertEquals([x.getName() for x in logs], ['preamble', 'stdio',
'stdio'])
def testSeed(self):
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.handleOutputLine('@@@SEED_STEP step2@@@')
self.handleOutputLine('@@@SEED_STEP step3@@@')
self.handleOutputLine('@@@SEED_STEP step4@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_CURSOR step3@@@')
self.step.script_observer.handleReturnCode(0)
stepnames = [x['step'].name for x in self.step.script_observer.sections]
started = [x['step'].isStarted() for x
in self.step.script_observer.sections]
finished = [x['step'].isFinished() for x in
self.step.script_observer.sections]
self.assertEquals(stepnames, ['annotated_steps', 'step', 'step2', 'step3',
'step4'])
self.assertEquals(started, [True, True, True, True, False])
self.assertEquals(finished, [False, True, True, True, False])
self.assertEquals(self.step.script_observer.annotate_status,
builder.SUCCESS)
self.assertNothingToWait()
def testCursor(self):
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.handleOutputLine('@@@SEED_STEP step2@@@')
self.handleOutputLine('@@@SEED_STEP step3@@@')
self.handleOutputLine('@@@SEED_STEP step4@@@')
self.handleOutputLine('@@@SEED_STEP step5@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_CURSOR step4@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@AAthis is line one@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@BBthis is line one@@@')
self.handleOutputLine('@@@STEP_CURSOR step4@@@')
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@AAthis is line two@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_LOG_LINE@test_log@BBthis is line two@@@')
self.handleOutputLine('@@@STEP_CURSOR step4@@@')
self.handleOutputLine('@@@STEP_LOG_END@test_log@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_LOG_END@test_log@@@')
self.handleOutputLine('@@@STEP_CURSOR step4@@@')
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.handleOutputLine('@@@STEP_CURSOR step3@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.step.script_observer.handleReturnCode(0)
stepnames = [x['step'].name for x in self.step.script_observer.sections]
started = [x['step'].isStarted() for x
in self.step.script_observer.sections]
finished = [x['step'].isFinished() for x
in self.step.script_observer.sections]
logs = [x['step'].logs for x in self.step.script_observer.sections]
self.assertEquals(stepnames, ['annotated_steps', 'step', 'step2', 'step3',
'step4', 'step5'])
self.assertEquals(started, [True, True, True, True, True, False])
self.assertEquals(finished, [False, True, True, True, True, False])
self.assertEquals(self.step.script_observer.annotate_status,
builder.SUCCESS)
lognames = [[x.getName() for x in l] for l in logs]
logtexts = [[x.text for x in l] for l in logs]
expected_lognames = [['preamble'], ['stdio'],
['stdio', 'test_log'],
['stdio'],
['stdio', 'test_log'],
[]]
self.assertEquals(lognames, expected_lognames)
self.assertEquals(logtexts[1:], [
[''],
['', 'BBthis is line one\nBBthis is line two'],
[''],
['', 'AAthis is line one\nAAthis is line two'],
[]
])
self.assertNothingToWait()
def testNeverEndingBuild(self):
self.handleOutputLine('@@@SEED_STEP step1@@@')
self.handleOutputLine('@@@STEP_CURSOR step1@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.handleOutputLine('@@@SEED_STEP step2@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.step.script_observer.handleReturnCode(0)
self.assertNothingToWait()
def testNeverEndingFailedBuild(self):
self.handleOutputLine('@@@SEED_STEP step1@@@')
self.handleOutputLine('@@@STEP_CURSOR step1@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.handleOutputLine('@@@SEED_STEP step2@@@')
self.handleOutputLine('@@@STEP_CURSOR step2@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@STEP_FAILURE@@@')
self.step.script_observer.handleReturnCode(0)
self.assertNothingToWait()
def testNeverEndingBuildWithBuildStep(self):
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.handleOutputLine('@@@BUILD_STEP step1@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.step.script_observer.handleReturnCode(0)
self.assertNothingToWait()
def testNeverEndingBuildStepsWithFailedBuildStep(self):
self.handleOutputLine('@@@BUILD_STEP step@@@')
self.handleOutputLine('@@@BUILD_STEP step1@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_FAILURE@@@')
self.step.script_observer.handleReturnCode(0)
self.assertNothingToWait()
def testCannotClosePreamble(self):
self.assertRaises(ValueError, self.handleOutputLine, '@@@STEP_CLOSED@@@')
def testCannotClosePreambleUsingDoubleStepClose(self):
self.startNewStep()
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.assertRaises(ValueError, self.handleOutputLine, '@@@STEP_CLOSED@@@')
def testPreambleNotClosedOnReturnCode(self):
self.startNewStep()
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.step.script_observer.handleReturnCode(0)
preamble_step = self.step.script_observer.sections[0]['step']
self.assertTrue(preamble_step.isStarted())
self.assertFalse(preamble_step.isFinished())
def testStopBuild(self):
self.startNewStep()
self.step.interrupt('it is time')
def testHandleRealOutput(self):
with open(os.path.join(test_env.DATA_PATH,
'chromium_fyi_android_annotator_stdio')) as f:
for line in f.readlines():
self.handleOutputLine(line.rstrip())
stepnames = [x['step'].name for x in self.step.script_observer.sections]
self.assertEquals(stepnames, ['annotated_steps',
'Environment setup',
'Check licenses for WebView',
'compile',
'Experimental Compile android_experimental ',
'Zip build'])
def testRealOutputBuildStepSeedStep(self):
with open(os.path.join(test_env.DATA_PATH,
'build_step_seed_step_annotator.txt')) as f:
for line in f.readlines():
self.handleOutputLine(line.rstrip())
def testMixingStepCursorWithLegacyBuildStep(self):
self.handleOutputLine('@@@SEED_STEP annotated_steps@@@')
self.handleOutputLine('@@@STEP_CURSOR annotated_steps@@@')
self.handleOutputLine('@@@STEP_STARTED@@@')
self.handleOutputLine('@@@BUILD_STEP step1@@@')
self.handleOutputLine('@@@BUILD_STEP step2@@@')
self.handleOutputLine('@@@STEP_CURSOR annotated_steps@@@')
self.handleOutputLine('@@@STEP_CURSOR annotated_steps@@@')
self.handleOutputLine('@@@STEP_CLOSED@@@')
self.step.script_observer.handleReturnCode(0)
self.assertNothingToWait()
if __name__ == '__main__':
unittest.main()
|
gnieboer/gnuradio | refs/heads/android | gr-qtgui/apps/plot_time_base.py | 47 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os, sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
try:
import scipy
except ImportError:
print "Error: Scipy required (www.scipy.org)."
sys.exit(1)
try:
from gnuradio.qtgui.plot_form import *
from gnuradio.qtgui.plot_base import *
except ImportError:
from plot_form import *
from plot_base import *
class plot_base(gr.top_block):
def __init__(self, filelist, samp_rate, start,
nsamples, max_nsamples,
auto_scale):
gr.top_block.__init__(self)
self._filelist = filelist
self._samp_rate = samp_rate
self._center_freq = 0
self._start = start
self._max_nsamps = max_nsamples
self._nsigs = len(self._filelist)
self._auto_scale = auto_scale
self._nsamps = nsamples
self._is_setup = False
self._y_min = -20
self._y_max = 20
self._y_range = 2
self._y_value = 1
self.gui_y_axis = None
self.qapp = QtGui.QApplication(sys.argv)
def setup(self):
self.skip = blocks.skiphead(self.dsize, self._start)
n = 0
self.srcs = list()
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for f in self._filelist:
data,_min,_max = self.read_samples(f, self._start, self._nsamps)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
self.srcs.append(self.src_type(data))
# Set default labels based on file names
fname = f.split("/")[-1]
if(type(self.gui_snk) == qtgui.time_sink_c_sptr):
self.gui_snk.set_line_label(n, "Re{{{0}}}".format(fname))
self.gui_snk.set_line_label(n+1, "Im{{{0}}}".format(fname))
n += 2
else:
self.gui_snk.set_line_label(n, "{0}".format(fname))
n += 1
self.connect(self.srcs[0], self.skip)
self.connect(self.skip, (self.gui_snk, 0))
for i,s in enumerate(self.srcs[1:]):
self.connect(s, (self.gui_snk, i+1))
self.gui_snk.set_update_time(0)
self.gui_snk.enable_menu(False)
self.auto_scale(self._auto_scale)
# Get Python Qt references
pyQt = self.gui_snk.pyqwidget()
self.pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self._is_setup = True
def is_setup(self):
return self._is_setup
def set_y_axis(self, y_min, y_max):
self.gui_snk.set_y_axis(y_min, y_max)
return y_min, y_max
def get_gui(self):
if(self.is_setup()):
return self.pyWin
else:
return None
def reset(self, newstart, newnsamps):
self.stop()
self.wait()
self._start = newstart
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for s,f in zip(self.srcs, self._filelist):
data,_min,_max = self.read_samples(f, self._start, newnsamps)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
s.set_data(data)
if(len(data) < newnsamps):
newnsamps = len(data)
self.auto_scale(self._auto_scale)
self._nsamps = newnsamps
self.gui_snk.set_nsamps(self._nsamps)
self.start()
def auto_scale(self, state):
if(state > 0):
self.gui_snk.set_y_axis(self._data_min, self._data_max)
self._auto_scale = True
self._y_value = self._data_max
self._y_range = self._data_max - self._data_min
self._y_min = 10*self._data_min
self._y_max = 10*self._data_max
if(self.gui_y_axis):
self.gui_y_axis(self._data_min, self._data_max)
else:
self._auto_scale = False
def setup_options(desc):
parser = OptionParser(option_class=eng_option, description=desc,
conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=1000000,
help="Set the number of samples to display [default=%default]")
parser.add_option("-S", "--start", type="int", default=0,
help="Starting sample number [default=%default]")
parser.add_option("-r", "--sample-rate", type="eng_float", default=1.0,
help="Set the sample rate of the signal [default=%default]")
parser.add_option("", "--no-auto-scale", action="store_true", default=False,
help="Do not auto-scale the plot [default=%default]")
(options,args) = parser.parse_args()
if(len(args) < 1):
parser.print_help()
sys.exit(0)
return (options,args)
|
aruizramon/alec_erpnext | refs/heads/master | erpnext/stock/doctype/serial_no/serial_no.py | 6 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, add_days, nowdate, getdate
from frappe import _, ValidationError
from erpnext.controllers.stock_controller import StockController
class SerialNoCannotCreateDirectError(ValidationError): pass
class SerialNoCannotCannotChangeError(ValidationError): pass
class SerialNoNotRequiredError(ValidationError): pass
class SerialNoRequiredError(ValidationError): pass
class SerialNoQtyError(ValidationError): pass
class SerialNoItemError(ValidationError): pass
class SerialNoWarehouseError(ValidationError): pass
class SerialNoNotExistsError(ValidationError): pass
class SerialNoDuplicateError(ValidationError): pass
class SerialNo(StockController):
def __init__(self, arg1, arg2=None):
super(SerialNo, self).__init__(arg1, arg2)
self.via_stock_ledger = False
def validate(self):
if self.get("__islocal") and self.warehouse:
frappe.throw(_("New Serial No cannot have Warehouse. Warehouse must be set by Stock Entry or Purchase Receipt"), SerialNoCannotCreateDirectError)
self.set_maintenance_status()
self.validate_warehouse()
self.validate_item()
self.on_stock_ledger_entry()
def set_maintenance_status(self):
if not self.warranty_expiry_date and not self.amc_expiry_date:
self.maintenance_status = None
if self.warranty_expiry_date and getdate(self.warranty_expiry_date) < getdate(nowdate()):
self.maintenance_status = "Out of Warranty"
if self.amc_expiry_date and getdate(self.amc_expiry_date) < getdate(nowdate()):
self.maintenance_status = "Out of AMC"
if self.amc_expiry_date and getdate(self.amc_expiry_date) >= getdate(nowdate()):
self.maintenance_status = "Under AMC"
if self.warranty_expiry_date and getdate(self.warranty_expiry_date) >= getdate(nowdate()):
self.maintenance_status = "Under Warranty"
def validate_warehouse(self):
if not self.get("__islocal"):
item_code, warehouse = frappe.db.get_value("Serial No",
self.name, ["item_code", "warehouse"])
if not self.via_stock_ledger and item_code != self.item_code:
frappe.throw(_("Item Code cannot be changed for Serial No."),
SerialNoCannotCannotChangeError)
if not self.via_stock_ledger and warehouse != self.warehouse:
frappe.throw(_("Warehouse cannot be changed for Serial No."),
SerialNoCannotCannotChangeError)
def validate_item(self):
"""
Validate whether serial no is required for this item
"""
item = frappe.get_doc("Item", self.item_code)
if item.has_serial_no!=1:
frappe.throw(_("Item {0} is not setup for Serial Nos. Check Item master").format(self.item_code))
self.item_group = item.item_group
self.description = item.description
self.item_name = item.item_name
self.brand = item.brand
self.warranty_period = item.warranty_period
def set_purchase_details(self, purchase_sle):
if purchase_sle:
self.purchase_document_type = purchase_sle.voucher_type
self.purchase_document_no = purchase_sle.voucher_no
self.purchase_date = purchase_sle.posting_date
self.purchase_time = purchase_sle.posting_time
self.purchase_rate = purchase_sle.incoming_rate
if purchase_sle.voucher_type == "Purchase Receipt":
self.supplier, self.supplier_name = \
frappe.db.get_value("Purchase Receipt", purchase_sle.voucher_no,
["supplier", "supplier_name"])
else:
for fieldname in ("purchase_document_type", "purchase_document_no",
"purchase_date", "purchase_time", "purchase_rate", "supplier", "supplier_name"):
self.set(fieldname, None)
def set_sales_details(self, delivery_sle):
if delivery_sle:
self.delivery_document_type = delivery_sle.voucher_type
self.delivery_document_no = delivery_sle.voucher_no
self.delivery_date = delivery_sle.posting_date
self.delivery_time = delivery_sle.posting_time
if delivery_sle.voucher_type in ("Delivery Note", "Sales Invoice"):
self.customer, self.customer_name = \
frappe.db.get_value(delivery_sle.voucher_type, delivery_sle.voucher_no,
["customer", "customer_name"])
if self.warranty_period:
self.warranty_expiry_date = add_days(cstr(delivery_sle.posting_date),
cint(self.warranty_period))
else:
for fieldname in ("delivery_document_type", "delivery_document_no",
"delivery_date", "delivery_time", "customer", "customer_name",
"warranty_expiry_date"):
self.set(fieldname, None)
def get_last_sle(self):
entries = {}
sle_dict = self.get_stock_ledger_entries()
if sle_dict:
if sle_dict.get("incoming", []):
entries["purchase_sle"] = sle_dict["incoming"][0]
if len(sle_dict.get("incoming", [])) - len(sle_dict.get("outgoing", [])) > 0:
entries["last_sle"] = sle_dict["incoming"][0]
else:
entries["last_sle"] = sle_dict["outgoing"][0]
entries["delivery_sle"] = sle_dict["outgoing"][0]
return entries
def get_stock_ledger_entries(self):
sle_dict = {}
for sle in frappe.db.sql("""select * from `tabStock Ledger Entry`
where serial_no like %s and item_code=%s and ifnull(is_cancelled, 'No')='No'
order by posting_date desc, posting_time desc, name desc""",
("%%%s%%" % self.name, self.item_code), as_dict=1):
if self.name.upper() in get_serial_nos(sle.serial_no):
if sle.actual_qty > 0:
sle_dict.setdefault("incoming", []).append(sle)
else:
sle_dict.setdefault("outgoing", []).append(sle)
return sle_dict
def on_trash(self):
sl_entries = frappe.db.sql("""select serial_no from `tabStock Ledger Entry`
where serial_no like %s and item_code=%s and ifnull(is_cancelled, 'No')='No'""",
("%%%s%%" % self.name, self.item_code), as_dict=True)
# Find the exact match
sle_exists = False
for d in sl_entries:
if self.name.upper() in get_serial_nos(d.serial_no):
sle_exists = True
break
if sle_exists:
frappe.throw(_("Cannot delete Serial No {0}, as it is used in stock transactions").format(self.name))
def before_rename(self, old, new, merge=False):
if merge:
frappe.throw(_("Sorry, Serial Nos cannot be merged"))
def after_rename(self, old, new, merge=False):
"""rename serial_no text fields"""
for dt in frappe.db.sql("""select parent from tabDocField
where fieldname='serial_no' and fieldtype='Text'"""):
for item in frappe.db.sql("""select name, serial_no from `tab%s`
where serial_no like '%%%s%%'""" % (dt[0], frappe.db.escape(old))):
serial_nos = map(lambda i: i==old and new or i, item[1].split('\n'))
frappe.db.sql("""update `tab%s` set serial_no = %s
where name=%s""" % (dt[0], '%s', '%s'),
('\n'.join(serial_nos), item[0]))
def on_stock_ledger_entry(self):
if self.via_stock_ledger and not self.get("__islocal"):
last_sle = self.get_last_sle()
self.set_purchase_details(last_sle.get("purchase_sle"))
self.set_sales_details(last_sle.get("delivery_sle"))
self.set_maintenance_status()
def process_serial_no(sle):
item_det = get_item_details(sle.item_code)
validate_serial_no(sle, item_det)
update_serial_nos(sle, item_det)
def validate_serial_no(sle, item_det):
if item_det.has_serial_no==0:
if sle.serial_no:
frappe.throw(_("Item {0} is not setup for Serial Nos. Column must be blank").format(sle.item_code),
SerialNoNotRequiredError)
else:
if sle.serial_no:
serial_nos = get_serial_nos(sle.serial_no)
if cint(sle.actual_qty) != flt(sle.actual_qty):
frappe.throw(_("Serial No {0} quantity {1} cannot be a fraction").format(sle.item_code, sle.actual_qty))
if len(serial_nos) and len(serial_nos) != abs(cint(sle.actual_qty)):
frappe.throw(_("{0} Serial Numbers required for Item {1}. You have provided {2}.").format(sle.actual_qty, sle.item_code, len(serial_nos)),
SerialNoQtyError)
if len(serial_nos) != len(set(serial_nos)):
frappe.throw(_("Duplicate Serial No entered for Item {0}").format(sle.item_code), SerialNoDuplicateError)
for serial_no in serial_nos:
if frappe.db.exists("Serial No", serial_no):
sr = frappe.get_doc("Serial No", serial_no)
if sr.item_code!=sle.item_code:
if not allow_serial_nos_with_different_item(serial_no, sle):
frappe.throw(_("Serial No {0} does not belong to Item {1}").format(serial_no,
sle.item_code), SerialNoItemError)
if sr.warehouse and sle.actual_qty > 0:
frappe.throw(_("Serial No {0} has already been received").format(serial_no),
SerialNoDuplicateError)
if sle.actual_qty < 0:
if sr.warehouse!=sle.warehouse:
frappe.throw(_("Serial No {0} does not belong to Warehouse {1}").format(serial_no,
sle.warehouse), SerialNoWarehouseError)
if sle.voucher_type in ("Delivery Note", "Sales Invoice") \
and sle.is_cancelled=="No" and not sr.warehouse:
frappe.throw(_("Serial No {0} does not belong to any Warehouse")
.format(serial_no), SerialNoWarehouseError)
elif sle.actual_qty < 0:
# transfer out
frappe.throw(_("Serial No {0} not in stock").format(serial_no), SerialNoNotExistsError)
elif sle.actual_qty < 0 or not item_det.serial_no_series:
frappe.throw(_("Serial Nos Required for Serialized Item {0}").format(sle.item_code),
SerialNoRequiredError)
def allow_serial_nos_with_different_item(sle_serial_no, sle):
"""
Allows same serial nos for raw materials and finished goods
in Manufacture / Repack type Stock Entry
"""
allow_serial_nos = False
if sle.voucher_type=="Stock Entry" and sle.actual_qty > 0:
stock_entry = frappe.get_doc("Stock Entry", sle.voucher_no)
if stock_entry.purpose in ("Repack", "Manufacture"):
for d in stock_entry.get("items"):
if d.serial_no and (d.s_warehouse if sle.is_cancelled=="No" else d.t_warehouse):
serial_nos = get_serial_nos(d.serial_no)
if sle_serial_no in serial_nos:
allow_serial_nos = True
return allow_serial_nos
def update_serial_nos(sle, item_det):
if sle.is_cancelled == "No" and not sle.serial_no and sle.actual_qty > 0 \
and item_det.has_serial_no == 1 and item_det.serial_no_series:
from frappe.model.naming import make_autoname
serial_nos = []
for i in xrange(cint(sle.actual_qty)):
serial_nos.append(make_autoname(item_det.serial_no_series, "Serial No"))
frappe.db.set(sle, "serial_no", "\n".join(serial_nos))
validate_serial_no(sle, item_det)
if sle.serial_no:
serial_nos = get_serial_nos(sle.serial_no)
for serial_no in serial_nos:
if frappe.db.exists("Serial No", serial_no):
sr = frappe.get_doc("Serial No", serial_no)
sr.via_stock_ledger = True
sr.item_code = sle.item_code
sr.warehouse = sle.warehouse if sle.actual_qty > 0 else None
sr.save(ignore_permissions=True)
elif sle.actual_qty > 0:
make_serial_no(serial_no, sle)
def get_item_details(item_code):
return frappe.db.sql("""select name, has_batch_no, docstatus,
is_stock_item, has_serial_no, serial_no_series
from tabItem where name=%s""", item_code, as_dict=True)[0]
def get_serial_nos(serial_no):
return [s.strip() for s in cstr(serial_no).strip().upper().replace(',', '\n').split('\n')
if s.strip()]
def make_serial_no(serial_no, sle):
sr = frappe.new_doc("Serial No")
sr.warehouse = None
sr.dont_update_if_missing.append("warehouse")
sr.flags.ignore_permissions = True
sr.serial_no = serial_no
sr.item_code = sle.item_code
sr.company = sle.company
sr.via_stock_ledger = True
sr.insert()
sr.warehouse = sle.warehouse
sr.save()
frappe.msgprint(_("Serial No {0} created").format(sr.name))
return sr.name
def update_serial_nos_after_submit(controller, parentfield):
stock_ledger_entries = frappe.db.sql("""select voucher_detail_no, serial_no, actual_qty, warehouse
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(controller.doctype, controller.name), as_dict=True)
if not stock_ledger_entries: return
for d in controller.get(parentfield):
update_rejected_serial_nos = True if (controller.doctype=="Purchase Receipt" and d.rejected_qty) else False
accepted_serial_nos_updated = False
warehouse = d.t_warehouse if controller.doctype == "Stock Entry" else d.warehouse
for sle in stock_ledger_entries:
if sle.voucher_detail_no==d.name:
if not accepted_serial_nos_updated and d.qty and abs(sle.actual_qty)==d.qty \
and sle.warehouse == warehouse and sle.serial_no != d.serial_no:
d.serial_no = sle.serial_no
frappe.db.set_value(d.doctype, d.name, "serial_no", sle.serial_no)
accepted_serial_nos_updated = True
if not update_rejected_serial_nos:
break
elif update_rejected_serial_nos and abs(sle.actual_qty)==d.rejected_qty \
and sle.warehouse == d.rejected_warehouse and sle.serial_no != d.rejected_serial_no:
d.rejected_serial_no = sle.serial_no
frappe.db.set_value(d.doctype, d.name, "rejected_serial_no", sle.serial_no)
update_rejected_serial_nos = False
if accepted_serial_nos_updated:
break
|
roadmapper/ansible | refs/heads/devel | test/units/regex/test_invalid_var_names.py | 83 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible import constants as C
test_cases = (('not-valid', ['-'], 'not_valid'), ('not!valid@either', ['!', '@'], 'not_valid_either'), ('1_nor_This', ['1'], '__nor_This'))
class TestInvalidVars(unittest.TestCase):
def test_positive_matches(self):
for name, invalid, sanitized in test_cases:
self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), invalid)
def test_negative_matches(self):
for name in ('this_is_valid', 'Also_1_valid', 'noproblem'):
self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), [])
def test_get_setting(self):
for name, invalid, sanitized in test_cases:
self.assertEqual(C.INVALID_VARIABLE_NAMES.sub('_', name), sanitized)
|
adrienbrault/home-assistant | refs/heads/dev | tests/components/flo/test_binary_sensor.py | 5 | """Test Flo by Moen binary sensor entities."""
from homeassistant.components.flo.const import DOMAIN as FLO_DOMAIN
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_PASSWORD,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from .common import TEST_PASSWORD, TEST_USER_ID
async def test_binary_sensors(hass, config_entry, aioclient_mock_fixture):
"""Test Flo by Moen sensors."""
config_entry.add_to_hass(hass)
assert await async_setup_component(
hass, FLO_DOMAIN, {CONF_USERNAME: TEST_USER_ID, CONF_PASSWORD: TEST_PASSWORD}
)
await hass.async_block_till_done()
assert len(hass.data[FLO_DOMAIN][config_entry.entry_id]["devices"]) == 2
valve_state = hass.states.get("binary_sensor.pending_system_alerts")
assert valve_state.state == STATE_ON
assert valve_state.attributes.get("info") == 0
assert valve_state.attributes.get("warning") == 2
assert valve_state.attributes.get("critical") == 0
assert valve_state.attributes.get(ATTR_FRIENDLY_NAME) == "Pending System Alerts"
detector_state = hass.states.get("binary_sensor.water_detected")
assert detector_state.state == STATE_OFF
|
bolabola/PTVS | refs/heads/master | Python/Tests/TestData/DebugAttach/Simple.py | 18 | from threading import Thread, current_thread, Lock
from time import sleep
report_progress_now = []
progress_lock = Lock()
def check_report_progress(me, id):
global report_progress_now, progress_lock
if report_progress_now[id]:
progress_lock.acquire()
print("{} [{}] is making progress.".format(me.name, me.ident))
report_progress_now[id] = False
progress_lock.release()
def exception_spam(id):
me = current_thread()
while True:
try:
raise Exception()
except Exception:
pass
check_report_progress(me, id)
def sleep_forever(id):
me = current_thread()
while True:
sleep(10)
check_report_progress(me, id)
def busy_loop(id):
me = current_thread()
i = 0
while True:
i = (i % 100000000) + 1
check_report_progress(me, id)
# if i % 10000000 == 0: raise Exception()
if __name__ == '__main__':
num_threads = 10
thread_list = []
thread_fun, main_fun = exception_spam, busy_loop
for i in range(num_threads):
thread_list.append(Thread(target=thread_fun,args=(i,)))
report_progress_now.append(True)
for t in thread_list:
t.start()
report_progress_now.append(True)
me, id = current_thread(), num_threads
while True:
try:
main_fun(id)
except KeyboardInterrupt:
progress_lock.acquire()
for i, _ in enumerate(report_progress_now):
report_progress_now[i] = True
progress_lock.release()
|
Leoniela/nipype | refs/heads/master | examples/fmri_fsl_reuse.py | 14 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=========================
fMRI: FSL reuse workflows
=========================
A workflow that uses fsl to perform a first level analysis on the nipype
tutorial data set::
python fmri_fsl_reuse.py
First tell python where to find the appropriate functions.
"""
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_fixed_effects_flow)
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
level1_workflow = pe.Workflow(name='level1flow')
preproc = create_featreg_preproc(whichvol='first')
modelfit = create_modelfit_workflow()
fixed_fx = create_fixed_effects_flow()
"""
Add artifact detection and model specification nodes between the preprocessing
and modelfitting workflows.
"""
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences = [True, False],
use_norm = True,
norm_threshold = 1,
zintensity_threshold = 3,
parameter_source = 'FSL',
mask_type = 'file'),
iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
name="art")
modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
level1_workflow.connect([(preproc, art, [('outputspec.motion_parameters',
'realignment_parameters'),
('outputspec.realigned_files',
'realigned_files'),
('outputspec.mask', 'mask_file')]),
(preproc, modelspec, [('outputspec.highpassed_files',
'functional_runs'),
('outputspec.motion_parameters',
'realignment_parameters')]),
(art, modelspec, [('outlier_files', 'outlier_files')]),
(modelspec, modelfit, [('session_info', 'inputspec.session_info')]),
(preproc, modelfit, [('outputspec.highpassed_files', 'inputspec.functional_data')])
])
"""
Set up first-level workflow
---------------------------
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i,[])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def num_copes(files):
return len(files)
pickfirst = lambda x : x[0]
level1_workflow.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
'flameo.mask_file')]),
(modelfit, fixed_fx, [(('outputspec.copes', sort_copes),
'inputspec.copes'),
('outputspec.dof_file',
'inputspec.dof_files'),
(('outputspec.varcopes',
sort_copes),
'inputspec.varcopes'),
(('outputspec.copes', num_copes),
'l2model.num_copes'),
])
])
"""
Experiment specific components
------------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1'] #, 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3','f5','f7','f10']]],
struct=[['subject_id','struct']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name = 'datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Use the get_node function to retrieve an internal node by name. Then set the
iterables on this node to perform two different extents of smoothing.
"""
inputnode = level1_workflow.get_node('featpreproc.inputspec')
inputnode.iterables = ('fwhm', [5.,10.])
hpcutoff = 120.
TR = 3.
inputnode.inputs.highpass = hpcutoff/(2*TR)
"""
Setup a function that returns subject-specific information about the
experimental paradigm. This is used by the
:class:`nipype.modelgen.SpecifyModel` to create the information necessary
to generate an SPM design matrix. In this tutorial, the same paradigm was used
for every participant. Other examples of this function are available in the
`doc/examples` folder. Note: Python knowledge required here.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print "Subject ID: %s\n"%str(subject_id)
output = []
names = ['Task-Odd','Task-Even']
for r in range(4):
onsets = [range(15,240,60),range(45,240,60)]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names]))
return output
"""
Setup the contrast structure that needs to be evaluated. This is a list of
lists. The inner list specifies the contrasts and has the following format -
[Name,Stat,[list of condition names],[weights on those conditions]. The
condition names must match the `names` listed in the `subjectinfo` function
described above.
"""
cont1 = ['Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]]
cont2 = ['Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]]
cont3 = ['Task','F', [cont1, cont2]]
contrasts = [cont1,cont2]
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.bases = {'dgamma':{'derivs': False}}
modelfit.inputs.inputspec.contrasts = contrasts
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.film_threshold = 1000
level1_workflow.base_dir = os.path.abspath('./fsl/workingdir')
level1_workflow.config['execution'] = dict(crashdump_dir=os.path.abspath('./fsl/crashdumps'))
level1_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(infosource, modelspec, [(('subject_id', subjectinfo),
'subject_info')]),
(datasource, preproc, [('func', 'inputspec.func')]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
#level1_workflow.write_graph()
level1_workflow.run()
#level1_workflow.run(plugin='MultiProc', plugin_args={'n_procs':2})
|
yosshy/updraft | refs/heads/master | tests/test_subrecipe.py | 1 | # Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paramiko
import time
import unittest
from updraft.decorator import task
from updraft.execute import execute
from updraft.recipe.base import Recipe
from updraft.command import base, ssh, local
CALLED_METHODS = None
class SampleRecipe(Recipe):
hosts = ["localhost", "localhost2", "localhost3"]
pool_size = 2
@classmethod
def select_subclass(cls, *args, **kwargs):
if (kwargs["host"] == cls.hosts[1]):
return SampleSubRecipe1
if (kwargs["host"] == cls.hosts[2]):
return SampleSubRecipe2
@task
def test(self):
global CALLED_METHODS
CALLED_METHODS.append("SampleRecipe:test")
class SampleSubRecipe1(SampleRecipe):
def test(self):
global CALLED_METHODS
CALLED_METHODS.append("SampleSubRecipe1:test")
class SampleSubRecipe2(SampleRecipe):
def test(self):
global CALLED_METHODS
CALLED_METHODS.append("SampleSubRecipe2:test")
class SubrecipeTest(unittest.TestCase):
def setUp(self):
global CALLED_METHODS
CALLED_METHODS = []
def test_subrecipe(self):
execute(SampleRecipe, "test")
self.assertEqual(CALLED_METHODS,
["SampleRecipe:test",
"SampleSubRecipe1:test",
"SampleSubRecipe2:test"])
|
pepetreshere/odoo | refs/heads/patch-2 | addons/test_mail/tests/test_mail_gateway.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import socket
from unittest.mock import DEFAULT
from unittest.mock import patch
from odoo import exceptions
from odoo.addons.mail.tests.common import mail_new_test_user
from odoo.addons.test_mail.data import test_mail_data
from odoo.addons.test_mail.data.test_mail_data import MAIL_TEMPLATE
from odoo.addons.test_mail.models.test_mail_models import MailTestGateway
from odoo.addons.test_mail.tests.common import TestMailCommon
from odoo.tests import tagged
from odoo.tests.common import users
from odoo.tools import email_split_and_format, formataddr, mute_logger
@tagged('mail_gateway')
class TestEmailParsing(TestMailCommon):
def test_message_parse_body(self):
# test pure plaintext
plaintext = self.format(test_mail_data.MAIL_TEMPLATE_PLAINTEXT, email_from='"Sylvie Lelitre" <test.sylvie.lelitre@agrolait.com>')
res = self.env['mail.thread'].message_parse(self.from_string(plaintext))
self.assertIn('Please call me as soon as possible this afternoon!', res['body'])
# test multipart / text and html -> html has priority
multipart = self.format(MAIL_TEMPLATE, email_from='"Sylvie Lelitre" <test.sylvie.lelitre@agrolait.com>')
res = self.env['mail.thread'].message_parse(self.from_string(multipart))
self.assertIn('<p>Please call me as soon as possible this afternoon!</p>', res['body'])
# test multipart / mixed
res = self.env['mail.thread'].message_parse(self.from_string(test_mail_data.MAIL_MULTIPART_MIXED))
self.assertNotIn(
'Should create a multipart/mixed: from gmail, *bold*, with attachment', res['body'],
'message_parse: text version should not be in body after parsing multipart/mixed')
self.assertIn(
'<div dir="ltr">Should create a multipart/mixed: from gmail, <b>bold</b>, with attachment.<br clear="all"><div><br></div>', res['body'],
'message_parse: html version should be in body after parsing multipart/mixed')
res = self.env['mail.thread'].message_parse(self.from_string(test_mail_data.MAIL_MULTIPART_MIXED_TWO))
self.assertNotIn('First and second part', res['body'],
'message_parse: text version should not be in body after parsing multipart/mixed')
self.assertIn('First part', res['body'],
'message_parse: first part of the html version should be in body after parsing multipart/mixed')
self.assertIn('Second part', res['body'],
'message_parse: second part of the html version should be in body after parsing multipart/mixed')
res = self.env['mail.thread'].message_parse(self.from_string(test_mail_data.MAIL_SINGLE_BINARY))
self.assertEqual(res['body'], '')
self.assertEqual(res['attachments'][0][0], 'thetruth.pdf')
res = self.env['mail.thread'].message_parse(self.from_string(test_mail_data.MAIL_MULTIPART_WEIRD_FILENAME))
self.assertEqual(res['attachments'][0][0], '62_@;,][)=.(ÇÀÉ.txt')
def test_message_parse_eml(self):
# Test that the parsing of mail with embedded emails as eml(msg) which generates empty attachments, can be processed.
mail = self.format(test_mail_data.MAIL_EML_ATTACHMENT, email_from='"Sylvie Lelitre" <test.sylvie.lelitre@agrolait.com>', to='generic@test.com')
self.env['mail.thread'].message_parse(self.from_string(mail))
def test_message_parse_eml_bounce_headers(self):
# Test Text/RFC822-Headers MIME content-type
msg_id = '<861878175823148.1577183525.736005783081055-openerp-19177-account.invoice@mycompany.example.com>'
mail = self.format(
test_mail_data.MAIL_EML_ATTACHMENT_BOUNCE_HEADERS,
email_from='MAILER-DAEMON@example.com (Mail Delivery System)',
to='test_bounce+82240-account.invoice-19177@mycompany.example.com',
# msg_id goes to the attachment's Message-Id header
msg_id=msg_id,
)
res = self.env['mail.thread'].message_parse(self.from_string(mail))
self.assertEqual(res['bounced_msg_id'], [msg_id], "Message-Id is not extracted from Text/RFC822-Headers attachment")
def test_message_parse_plaintext(self):
""" Incoming email in plaintext should be stored as html """
mail = self.format(test_mail_data.MAIL_TEMPLATE_PLAINTEXT, email_from='"Sylvie Lelitre" <test.sylvie.lelitre@agrolait.com>', to='generic@test.com')
res = self.env['mail.thread'].message_parse(self.from_string(mail))
self.assertIn('<pre>\nPlease call me as soon as possible this afternoon!\n\n--\nSylvie\n</pre>', res['body'])
def test_message_parse_xhtml(self):
# Test that the parsing of XHTML mails does not fail
self.env['mail.thread'].message_parse(self.from_string(test_mail_data.MAIL_XHTML))
@tagged('mail_gateway')
class TestMailAlias(TestMailCommon):
@users('employee')
def test_alias_creation(self):
record = self.env['mail.test.container'].create({
'name': 'Test Record',
'alias_name': 'alias.test',
'alias_contact': 'followers',
})
self.assertEqual(record.alias_id.alias_model_id, self.env['ir.model']._get('mail.test.container'))
self.assertEqual(record.alias_id.alias_force_thread_id, record.id)
self.assertEqual(record.alias_id.alias_parent_model_id, self.env['ir.model']._get('mail.test.container'))
self.assertEqual(record.alias_id.alias_parent_thread_id, record.id)
self.assertEqual(record.alias_id.alias_name, 'alias.test')
self.assertEqual(record.alias_id.alias_contact, 'followers')
record.write({
'alias_name': 'better.alias.test',
'alias_defaults': "{'default_name': 'defaults'}"
})
self.assertEqual(record.alias_id.alias_name, 'better.alias.test')
self.assertEqual(record.alias_id.alias_defaults, "{'default_name': 'defaults'}")
with self.assertRaises(exceptions.AccessError):
record.write({
'alias_force_thread_id': 0,
})
with self.assertRaises(exceptions.AccessError):
record.write({
'alias_model_id': self.env['ir.model']._get('mail.test.gateway').id,
})
with self.assertRaises(exceptions.ValidationError):
record.write({'alias_defaults': "{'custom_field': brokendict"})
def test_alias_setup(self):
alias = self.env['mail.alias'].create({
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_name': 'b4r+_#_R3wl$$',
})
self.assertEqual(alias.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
with self.assertRaises(exceptions.ValidationError):
alias.write({'alias_defaults': "{'custom_field': brokendict"})
def test_alias_name_unique(self):
alias_model_id = self.env['ir.model']._get('mail.test.gateway').id
catchall_alias = self.env['ir.config_parameter'].sudo().get_param('mail.catchall.alias')
bounce_alias = self.env['ir.config_parameter'].sudo().get_param('mail.bounce.alias')
# test you cannot create aliases matching bounce / catchall
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
self.env['mail.alias'].create({'alias_model_id': alias_model_id, 'alias_name': catchall_alias})
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
self.env['mail.alias'].create({'alias_model_id': alias_model_id, 'alias_name': bounce_alias})
new_mail_alias = self.env['mail.alias'].create({
'alias_model_id': alias_model_id,
'alias_name': 'unused.test.alias'
})
# test that re-using catchall and bounce alias raises UserError
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
new_mail_alias.write({
'alias_name': catchall_alias
})
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
new_mail_alias.write({
'alias_name': bounce_alias
})
new_mail_alias.write({'alias_name': 'another.unused.test.alias'})
# test that duplicating an alias should have blank name
copy_new_mail_alias = new_mail_alias.copy()
self.assertFalse(copy_new_mail_alias.alias_name)
# cannot set catchall / bounce to used alias
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
self.env['ir.config_parameter'].sudo().set_param('mail.catchall.alias', new_mail_alias.alias_name)
with self.assertRaises(exceptions.UserError), self.cr.savepoint():
self.env['ir.config_parameter'].sudo().set_param('mail.bounce.alias', new_mail_alias.alias_name)
@tagged('mail_gateway')
class TestMailgateway(TestMailCommon):
@classmethod
def setUpClass(cls):
super(TestMailgateway, cls).setUpClass()
cls.test_model = cls.env['ir.model']._get('mail.test.gateway')
cls.email_from = '"Sylvie Lelitre" <test.sylvie.lelitre@agrolait.com>'
cls.test_record = cls.env['mail.test.gateway'].with_context(cls._test_context).create({
'name': 'Test',
'email_from': 'ignasse@example.com',
}).with_context({})
cls.partner_1 = cls.env['res.partner'].with_context(cls._test_context).create({
'name': 'Valid Lelitre',
'email': 'valid.lelitre@agrolait.com',
})
# groups@.. will cause the creation of new mail.test.gateway
cls.alias = cls.env['mail.alias'].create({
'alias_name': 'groups',
'alias_user_id': False,
'alias_model_id': cls.test_model.id,
'alias_contact': 'everyone'})
# Set a first message on public group to test update and hierarchy
cls.fake_email = cls.env['mail.message'].create({
'model': 'mail.test.gateway',
'res_id': cls.test_record.id,
'subject': 'Public Discussion',
'message_type': 'email',
'subtype_id': cls.env.ref('mail.mt_comment').id,
'author_id': cls.partner_1.id,
'message_id': '<123456-openerp-%s-mail.test.gateway@%s>' % (cls.test_record.id, socket.gethostname()),
})
cls._init_mail_gateway()
# --------------------------------------------------
# Base low-level tests
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_basic(self):
""" Test details of created message going through mailgateway """
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Specific')
# Test: one group created by mailgateway administrator as user_id is not set
self.assertEqual(len(record), 1, 'message_process: a new mail.test should have been created')
res = record.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.env.uid)
# Test: one message that is the incoming email
self.assertEqual(len(record.message_ids), 1)
msg = record.message_ids[0]
self.assertEqual(msg.subject, 'Specific')
self.assertIn('Please call me as soon as possible this afternoon!', msg.body)
self.assertEqual(msg.message_type, 'email')
self.assertEqual(msg.subtype_id, self.env.ref('mail.mt_comment'))
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_cid(self):
record = self.format_and_process(test_mail_data.MAIL_MULTIPART_IMAGE, self.email_from, 'groups@test.com')
message = record.message_ids[0]
for attachment in message.attachment_ids:
self.assertIn('/web/image/%s' % attachment.id, message.body)
self.assertEqual(
set(message.attachment_ids.mapped('name')),
set(['rosaçée.gif', 'verte!µ.gif', 'orangée.gif']))
def test_message_process_followers(self):
""" Incoming email: recognized author not archived and not odoobot: added as follower """
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com')
self.assertEqual(record.message_ids[0].author_id, self.partner_1,
'message_process: recognized email -> author_id')
self.assertEqual(record.message_ids[0].email_from, self.partner_1.email_formatted)
self.assertEqual(record.message_follower_ids.partner_id, self.partner_1,
'message_process: recognized email -> added as follower')
self.assertEqual(record.message_partner_ids, self.partner_1,
'message_process: recognized email -> added as follower')
# just an email -> no follower
with self.mock_mail_gateway():
record2 = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'groups@test.com',
subject='Another Email')
self.assertEqual(record2.message_ids[0].author_id, self.env['res.partner'])
self.assertEqual(record2.message_ids[0].email_from, self.email_from)
self.assertEqual(record2.message_follower_ids.partner_id, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
self.assertEqual(record2.message_partner_ids, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
# archived partner -> no follower
self.partner_1.active = False
self.partner_1.flush()
with self.mock_mail_gateway():
record3 = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com',
subject='Yet Another Email')
self.assertEqual(record3.message_ids[0].author_id, self.env['res.partner'])
self.assertEqual(record3.message_ids[0].email_from, self.partner_1.email_formatted)
self.assertEqual(record3.message_follower_ids.partner_id, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
self.assertEqual(record3.message_partner_ids, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
# partner_root -> never again
odoobot = self.env.ref('base.partner_root')
odoobot.active = True
odoobot.email = 'odoobot@example.com'
with self.mock_mail_gateway():
record4 = self.format_and_process(
MAIL_TEMPLATE, odoobot.email_formatted, 'groups@test.com',
subject='Odoobot Automatic Answer')
self.assertEqual(record4.message_ids[0].author_id, odoobot)
self.assertEqual(record4.message_ids[0].email_from, odoobot.email_formatted)
self.assertEqual(record4.message_follower_ids.partner_id, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
self.assertEqual(record4.message_partner_ids, self.env['res.partner'],
'message_process: unrecognized email -> no follower')
# --------------------------------------------------
# Author recognition
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_email_from(self):
""" Incoming email: not recognized author: email_from, no author_id, no followers """
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com')
self.assertFalse(record.message_ids[0].author_id, 'message_process: unrecognized email -> no author_id')
self.assertEqual(record.message_ids[0].email_from, self.email_from)
self.assertEqual(len(record.message_partner_ids), 0,
'message_process: newly create group should not have any follower')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_author(self):
""" Incoming email: recognized author: email_from, author_id, added as follower """
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com', subject='Test1')
self.assertEqual(record.message_ids[0].author_id, self.partner_1,
'message_process: recognized email -> author_id')
self.assertEqual(record.message_ids[0].email_from, self.partner_1.email_formatted)
self.assertNotSentEmail() # No notification / bounce should be sent
# Email recognized if partner has a formatted email
self.partner_1.write({'email': '"Valid Lelitre" <%s>' % self.partner_1.email})
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email, 'groups@test.com', subject='Test2')
self.assertEqual(record.message_ids[0].author_id, self.partner_1,
'message_process: recognized email -> author_id')
self.assertEqual(record.message_ids[0].email_from, self.partner_1.email)
self.assertNotSentEmail() # No notification / bounce should be sent
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_partner_find(self):
""" Finding the partner based on email, based on partner / user / follower """
self.alias.write({'alias_force_thread_id': self.test_record.id})
from_1 = self.env['res.partner'].create({'name': 'Brice Denisse', 'email': 'from.test@example.com'})
self.format_and_process(MAIL_TEMPLATE, from_1.email_formatted, 'groups@test.com')
self.assertEqual(self.test_record.message_ids[0].author_id, from_1)
self.test_record.message_unsubscribe([from_1.id])
from_2 = mail_new_test_user(self.env, login='B', groups='base.group_user', name='User Denisse', email='from.test@example.com')
self.format_and_process(MAIL_TEMPLATE, from_1.email_formatted, 'groups@test.com')
self.assertEqual(self.test_record.message_ids[0].author_id, from_2.partner_id)
self.test_record.message_unsubscribe([from_2.partner_id.id])
from_3 = self.env['res.partner'].create({'name': 'FOllower Denisse', 'email': 'from.test@example.com'})
self.test_record.message_subscribe([from_3.id])
self.format_and_process(MAIL_TEMPLATE, from_1.email_formatted, 'groups@test.com')
self.assertEqual(self.test_record.message_ids[0].author_id, from_3)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_email_author_exclude_alias(self):
""" Do not set alias as author to avoid including aliases in discussions """
from_1 = self.env['res.partner'].create({'name': 'Brice Denisse', 'email': 'from.test@test.com'})
self.env['mail.alias'].create({
'alias_name': 'from.test',
'alias_model_id': self.env['ir.model']._get('mail.test.gateway').id
})
record = self.format_and_process(MAIL_TEMPLATE, from_1.email_formatted, 'groups@test.com')
self.assertFalse(record.message_ids[0].author_id)
self.assertEqual(record.message_ids[0].email_from, from_1.email_formatted)
# --------------------------------------------------
# Alias configuration
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models.unlink', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_config_bounced_content(self):
""" Custom bounced message for the alias => Received this custom message """
self.alias.write({
'alias_contact': 'partners',
'alias_bounced_content': '<p>What Is Dead May Never Die</p>'
})
# Test: custom bounced content
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Should Bounce')
self.assertFalse(record, 'message_process: should have bounced')
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], body_content='<p>What Is Dead May Never Die</p>')
self.alias.write({
'alias_contact': 'partners',
'alias_bounced_content': '<p></br></p>'
})
# Test: with "empty" bounced content (simulate view, putting always '<p></br></p>' in html field)
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Should Bounce')
self.assertFalse(record, 'message_process: should have bounced')
# Check if default (hardcoded) value is in the mail content
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], body_content='The following email sent to')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_defaults(self):
""" Test alias defaults and inner values """
self.alias.write({
'alias_user_id': self.user_employee.id,
'alias_defaults': "{'custom_field': 'defaults_custom'}"
})
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Specific')
self.assertEqual(len(record), 1)
res = record.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.user_employee.id)
self.assertEqual(record.name, 'Specific')
self.assertEqual(record.custom_field, 'defaults_custom')
self.alias.write({'alias_defaults': '""'})
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Specific2')
self.assertEqual(len(record), 1)
res = record.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.user_employee.id)
self.assertEqual(record.name, 'Specific2')
self.assertFalse(record.custom_field)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_user_id(self):
""" Test alias ownership """
self.alias.write({'alias_user_id': self.user_employee.id})
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com')
self.assertEqual(len(record), 1)
res = record.get_metadata()[0].get('create_uid') or [None]
self.assertEqual(res[0], self.user_employee.id)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_everyone(self):
""" Incoming email: everyone: new record + message_new """
self.alias.write({'alias_contact': 'everyone'})
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Specific')
self.assertEqual(len(record), 1)
self.assertEqual(len(record.message_ids), 1)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models.unlink', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_partners_bounce(self):
""" Incoming email from an unknown partner on a Partners only alias -> bounce + test bounce email """
self.alias.write({'alias_contact': 'partners'})
# Test: no group created, email bounced
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Should Bounce')
self.assertFalse(record)
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], subject='Re: Should Bounce')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models.unlink', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_followers_bounce(self):
""" Incoming email from unknown partner / not follower partner on a Followers only alias -> bounce """
self.alias.write({
'alias_contact': 'followers',
'alias_parent_model_id': self.env['ir.model']._get('mail.test.gateway').id,
'alias_parent_thread_id': self.test_record.id,
})
# Test: unknown on followers alias -> bounce
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Should Bounce')
self.assertFalse(record, 'message_process: should have bounced')
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], subject='Re: Should Bounce')
# Test: partner on followers alias -> bounce
self._init_mail_mock()
with self.mock_mail_gateway():
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com', subject='Should Bounce')
self.assertFalse(record, 'message_process: should have bounced')
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], subject='Re: Should Bounce')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_partner(self):
""" Incoming email from a known partner on a Partners alias -> ok (+ test on alias.user_id) """
self.alias.write({'alias_contact': 'partners'})
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com')
# Test: one group created by alias user
self.assertEqual(len(record), 1)
self.assertEqual(len(record.message_ids), 1)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_alias_followers(self):
""" Incoming email from a parent document follower on a Followers only alias -> ok """
self.alias.write({
'alias_contact': 'followers',
'alias_parent_model_id': self.env['ir.model']._get('mail.test.gateway').id,
'alias_parent_thread_id': self.test_record.id,
})
self.test_record.message_subscribe(partner_ids=[self.partner_1.id])
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@test.com')
# Test: one group created by Raoul (or Sylvie maybe, if we implement it)
self.assertEqual(len(record), 1)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models.unlink', 'odoo.addons.mail.models.mail_mail')
def test_message_process_alias_update(self):
""" Incoming email update discussion + notification email """
self.alias.write({'alias_force_thread_id': self.test_record.id})
self.test_record.message_subscribe(partner_ids=[self.partner_1.id])
with self.mock_mail_gateway():
record = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'groups@test.com>',
msg_id='<1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>', subject='Re: cats')
# Test: no new group + new message
self.assertFalse(record, 'message_process: alias update should not create new records')
self.assertEqual(len(self.test_record.message_ids), 2)
# Test: sent emails: 1 (Sylvie copy of the incoming email)
self.assertSentEmail(self.email_from, [self.partner_1], subject='Re: cats')
# --------------------------------------------------
# Creator recognition
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_create_uid_crash(self):
def _employee_crash(*args, **kwargs):
""" If employee is test employee, consider he has no access on document """
recordset = args[0]
if recordset.env.uid == self.user_employee.id and not recordset.env.su:
if kwargs.get('raise_exception', True):
raise exceptions.AccessError('Hop hop hop Ernest, please step back.')
return False
return DEFAULT
with patch.object(MailTestGateway, 'check_access_rights', autospec=True, side_effect=_employee_crash):
record = self.format_and_process(MAIL_TEMPLATE, self.user_employee.email_formatted, 'groups@test.com', subject='NoEmployeeAllowed')
self.assertEqual(record.create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].subject, 'NoEmployeeAllowed')
self.assertEqual(record.message_ids[0].create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].author_id, self.user_employee.partner_id)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_create_uid_email(self):
record = self.format_and_process(MAIL_TEMPLATE, self.user_employee.email_formatted, 'groups@test.com', subject='Email Found')
self.assertEqual(record.create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].subject, 'Email Found')
self.assertEqual(record.message_ids[0].create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].author_id, self.user_employee.partner_id)
record = self.format_and_process(MAIL_TEMPLATE, 'Another name <%s>' % self.user_employee.email, 'groups@test.com', subject='Email OtherName')
self.assertEqual(record.create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].subject, 'Email OtherName')
self.assertEqual(record.message_ids[0].create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].author_id, self.user_employee.partner_id)
record = self.format_and_process(MAIL_TEMPLATE, self.user_employee.email_normalized, 'groups@test.com', subject='Email SimpleEmail')
self.assertEqual(record.create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].subject, 'Email SimpleEmail')
self.assertEqual(record.message_ids[0].create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].author_id, self.user_employee.partner_id)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_create_uid_email_follower(self):
self.alias.write({
'alias_parent_model_id': self.test_model.id,
'alias_parent_thread_id': self.test_record.id,
})
follower_user = mail_new_test_user(self.env, login='better', groups='base.group_user', name='Ernest Follower', email=self.user_employee.email)
self.test_record.message_subscribe(follower_user.partner_id.ids)
record = self.format_and_process(MAIL_TEMPLATE, self.user_employee.email_formatted, 'groups@test.com', subject='FollowerWinner')
self.assertEqual(record.create_uid, follower_user)
self.assertEqual(record.message_ids[0].subject, 'FollowerWinner')
self.assertEqual(record.message_ids[0].create_uid, follower_user)
self.assertEqual(record.message_ids[0].author_id, follower_user.partner_id)
# name order win
self.test_record.message_unsubscribe(follower_user.partner_id.ids)
self.test_record.flush()
record = self.format_and_process(MAIL_TEMPLATE, self.user_employee.email_formatted, 'groups@test.com', subject='FirstFoundWinner')
self.assertEqual(record.create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].subject, 'FirstFoundWinner')
self.assertEqual(record.message_ids[0].create_uid, self.user_employee)
self.assertEqual(record.message_ids[0].author_id, self.user_employee.partner_id)
# --------------------------------------------------
# Alias routing management
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_alias_no_domain(self):
""" Incoming email: write to alias even if no domain set: considered as valid alias """
self.env['ir.config_parameter'].set_param('mail.catchall.domain', '')
new_record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, 'groups@another.domain.com', subject='Test Subject')
# Test: one group created
self.assertEqual(len(new_record), 1, 'message_process: a new mail.test.simple should have been created')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_alias_forward_bypass_reply_first(self):
""" Incoming email: write to two "new thread" alias, one as a reply, one being another model -> consider as a forward """
self.assertEqual(len(self.test_record.message_ids), 1)
# test@.. will cause the creation of new mail.test
new_alias_2 = self.env['mail.alias'].create({
'alias_name': 'test',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
new_rec = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (new_alias_2.alias_name, self.alias_domain, self.alias.alias_name, self.alias_domain),
subject='Test Subject',
extra='In-Reply-To:\r\n\t%s\n' % self.fake_email.message_id,
target_model=new_alias_2.alias_model_id.model
)
# Forward created a new record in mail.test
self.assertEqual(len(new_rec), 1, 'message_process: a new mail.test should have been created')
self.assertEqual(new_rec._name, new_alias_2.alias_model_id.model)
# No new post on test_record, no new record in mail.test.simple either
self.assertEqual(len(self.test_record.message_ids), 1, 'message_process: should not post on replied record as forward should bypass it')
new_simple = self.env['mail.test.simple'].search([('name', '=', 'Test Subject')])
self.assertEqual(len(new_simple), 0, 'message_process: a new mail.test should not have been created')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_alias_forward_bypass_reply_second(self):
""" Incoming email: write to two "new thread" alias, one as a reply, one being another model -> consider as a forward """
self.assertEqual(len(self.test_record.message_ids), 1)
# test@.. will cause the creation of new mail.test
new_alias_2 = self.env['mail.alias'].create({
'alias_name': 'test',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
new_rec = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (self.alias.alias_name, self.alias_domain, new_alias_2.alias_name, self.alias_domain),
subject='Test Subject',
extra='In-Reply-To:\r\n\t%s\n' % self.fake_email.message_id,
target_model=new_alias_2.alias_model_id.model
)
# Forward created a new record in mail.test
self.assertEqual(len(new_rec), 1, 'message_process: a new mail.test should have been created')
self.assertEqual(new_rec._name, new_alias_2.alias_model_id.model)
# No new post on test_record, no new record in mail.test.simple either
self.assertEqual(len(self.test_record.message_ids), 1, 'message_process: should not post on replied record as forward should bypass it')
new_simple = self.env['mail.test.simple'].search([('name', '=', 'Test Subject')])
self.assertEqual(len(new_simple), 0, 'message_process: a new mail.test should not have been created')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_alias_forward_bypass_update_alias(self):
""" Incoming email: write to one "update", one "new thread" alias, one as a reply, one being another model -> consider as a forward """
self.assertEqual(len(self.test_record.message_ids), 1)
self.alias.write({
'alias_force_thread_id': self.test_record.id,
})
# test@.. will cause the creation of new mail.test
new_alias_2 = self.env['mail.alias'].create({
'alias_name': 'test',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
new_rec = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (new_alias_2.alias_name, self.alias_domain, self.alias.alias_name, self.alias_domain),
subject='Test Subject',
extra='In-Reply-To:\r\n\t%s\n' % self.fake_email.message_id,
target_model=new_alias_2.alias_model_id.model
)
# Forward created a new record in mail.test
self.assertEqual(len(new_rec), 1, 'message_process: a new mail.test should have been created')
self.assertEqual(new_rec._name, new_alias_2.alias_model_id.model)
# No new post on test_record, no new record in mail.test.simple either
self.assertEqual(len(self.test_record.message_ids), 1, 'message_process: should not post on replied record as forward should bypass it')
# No new record on first alias model
new_simple = self.env['mail.test.gateway'].search([('name', '=', 'Test Subject')])
self.assertEqual(len(new_simple), 0, 'message_process: a new mail.test should not have been created')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_alias_multiple_new(self):
""" Incoming email: write to two aliases creating records: both should be activated """
# test@.. will cause the creation of new mail.test
new_alias_2 = self.env['mail.alias'].create({
'alias_name': 'test',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
new_rec = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (self.alias.alias_name, self.alias_domain, new_alias_2.alias_name, self.alias_domain),
subject='Test Subject',
target_model=new_alias_2.alias_model_id.model
)
# New record in both mail.test (new_alias_2) and mail.test.simple (self.alias)
self.assertEqual(len(new_rec), 1, 'message_process: a new mail.test should have been created')
self.assertEqual(new_rec._name, new_alias_2.alias_model_id.model)
new_simple = self.env['mail.test.gateway'].search([('name', '=', 'Test Subject')])
self.assertEqual(len(new_simple), 1, 'message_process: a new mail.test should have been created')
# --------------------------------------------------
# Email Management
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_bounce(self):
"""Incoming email: bounce using bounce alias: no record creation """
with self.mock_mail_gateway():
new_recs = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s+%s-%s-%s@%s' % (
self.alias_bounce, self.fake_email.id,
self.fake_email.model, self.fake_email.res_id,
self.alias_domain
),
subject='Should bounce',
)
self.assertFalse(new_recs)
self.assertNotSentEmail()
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_bounce_other_recipients(self):
"""Incoming email: bounce processing: bounce should be computed even if not first recipient """
with self.mock_mail_gateway():
new_recs = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s+%s-%s-%s@%s' % (
self.alias.alias_name, self.alias_domain,
self.alias_bounce, self.fake_email.id,
self.fake_email.model, self.fake_email.res_id,
self.alias_domain
),
subject='Should bounce',
)
self.assertFalse(new_recs)
self.assertNotSentEmail()
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.addons.mail.models.mail_mail', 'odoo.models.unlink')
def test_message_route_write_to_catchall(self):
""" Writing directly to catchall should bounce """
# Test: no group created, email bounced
with self.mock_mail_gateway():
record = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'"My Super Catchall" <%s@%s>' % (self.alias_catchall, self.alias_domain),
subject='Should Bounce')
self.assertFalse(record)
self.assertSentEmail('"MAILER-DAEMON" <bounce.test@test.com>', ['whatever-2a840@postmaster.twitter.com'], subject='Re: Should Bounce')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_write_to_catchall_other_recipients_first(self):
""" Writing directly to catchall and a valid alias should take alias """
# Test: no group created, email bounced
with self.mock_mail_gateway():
record = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (self.alias_catchall, self.alias_domain, self.alias.alias_name, self.alias_domain),
subject='Catchall Not Blocking'
)
# Test: one group created
self.assertEqual(len(record), 1, 'message_process: a new mail.test should have been created')
# No bounce email
self.assertNotSentEmail()
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_route_write_to_catchall_other_recipients_second(self):
""" Writing directly to catchall and a valid alias should take alias """
# Test: no group created, email bounced
with self.mock_mail_gateway():
record = self.format_and_process(
MAIL_TEMPLATE, self.partner_1.email_formatted,
'%s@%s, %s@%s' % (self.alias.alias_name, self.alias_domain, self.alias_catchall, self.alias_domain),
subject='Catchall Not Blocking'
)
# Test: one group created
self.assertEqual(len(record), 1, 'message_process: a new mail.test should have been created')
# No bounce email
self.assertNotSentEmail()
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_alias(self):
""" Writing to bounce alias is considered as a bounce even if not multipart/report bounce structure """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
bounced_mail_id = 4442
bounce_email_to = '%s+%s-%s-%s@%s' % ('bounce.test', bounced_mail_id, self.test_record._name, self.test_record.id, 'test.com')
record = self.format_and_process(MAIL_TEMPLATE, self.partner_1.email_formatted, bounce_email_to, subject='Undelivered Mail Returned to Sender')
self.assertFalse(record)
# No information found in bounce email -> not possible to do anything except avoiding email
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_from_mailer_demon(self):
""" MAILER_DAEMON emails are considered as bounce """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
record = self.format_and_process(MAIL_TEMPLATE, 'MAILER-DAEMON@example.com', 'groups@test.com', subject='Undelivered Mail Returned to Sender')
self.assertFalse(record)
# No information found in bounce email -> not possible to do anything except avoiding email
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_multipart_alias(self):
""" Multipart/report bounce correctly make related partner bounce """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
bounced_mail_id = 4442
bounce_email_to = '%s+%s-%s-%s@%s' % ('bounce.test', bounced_mail_id, self.test_record._name, self.test_record.id, 'test.com')
record = self.format_and_process(test_mail_data.MAIL_BOUNCE, self.partner_1.email_formatted, bounce_email_to, subject='Undelivered Mail Returned to Sender')
self.assertFalse(record)
# Missing in reply to message_id -> cannot find original record
self.assertEqual(self.partner_1.message_bounce, 1)
self.assertEqual(self.test_record.message_bounce, 0)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_multipart_alias_reply(self):
""" Multipart/report bounce correctly make related partner and record found in bounce email bounce """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
bounced_mail_id = 4442
bounce_email_to = '%s+%s-%s-%s@%s' % ('bounce.test', bounced_mail_id, self.test_record._name, self.test_record.id, 'test.com')
extra = self.fake_email.message_id
record = self.format_and_process(test_mail_data.MAIL_BOUNCE, self.partner_1.email_formatted, bounce_email_to, subject='Undelivered Mail Returned to Sender', extra=extra)
self.assertFalse(record)
self.assertEqual(self.partner_1.message_bounce, 1)
self.assertEqual(self.test_record.message_bounce, 1)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_multipart_alias_whatever_from(self):
""" Multipart/report bounce correctly make related record found in bounce email bounce """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
bounced_mail_id = 4442
bounce_email_to = '%s+%s-%s-%s@%s' % ('bounce.test', bounced_mail_id, self.test_record._name, self.test_record.id, 'test.com')
extra = self.fake_email.message_id
record = self.format_and_process(test_mail_data.MAIL_BOUNCE, 'Whatever <what@ever.com>', bounce_email_to, subject='Undelivered Mail Returned to Sender', extra=extra)
self.assertFalse(record)
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 1)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_multipart_whatever_to_and_from(self):
""" Multipart/report bounce correctly make related record found in bounce email bounce """
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 0)
extra = self.fake_email.message_id
record = self.format_and_process(test_mail_data.MAIL_BOUNCE, 'Whatever <what@ever.com>', 'groups@test.com', subject='Undelivered Mail Returned to Sender', extra=extra)
self.assertFalse(record)
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.test_record.message_bounce, 1)
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_bounce_records_channel(self):
""" Test blacklist allow to multi-bounce and auto update of mail.channel """
self.other_record = self.env['mail.test.gateway'].create({
'email_from': 'Another name <%s>' % self.partner_1.email
})
self.yet_other_record = self.env['mail.test.gateway'].create({
'email_from': 'Yet Another name <%s>' % self.partner_1.email.upper()
})
self.test_channel = self.env['mail.channel'].create({
'name': 'Test',
'channel_last_seen_partner_ids': [(0, 0, {'partner_id': self.partner_1.id})],
})
self.fake_email.write({
'model': 'mail.channel',
'res_id': self.test_channel.id,
})
self.assertIn(self.partner_1, self.test_channel.channel_partner_ids)
self.assertEqual(self.partner_1.message_bounce, 0)
self.assertEqual(self.other_record.message_bounce, 0)
self.assertEqual(self.yet_other_record.message_bounce, 0)
extra = self.fake_email.message_id
for i in range(10):
record = self.format_and_process(test_mail_data.MAIL_BOUNCE, 'A third name <%s>' % self.partner_1.email, 'groups@test.com', subject='Undelivered Mail Returned to Sender', extra=extra)
self.assertFalse(record)
self.assertEqual(self.partner_1.message_bounce, 10)
self.assertEqual(self.test_record.message_bounce, 0)
self.assertEqual(self.other_record.message_bounce, 10)
self.assertEqual(self.yet_other_record.message_bounce, 10)
self.assertNotIn(self.partner_1, self.test_channel.channel_partner_ids)
# --------------------------------------------------
# Thread formation
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_in_reply_to(self):
""" Incoming email using in-rely-to should go into the right destination even with a wrong destination """
init_msg_count = len(self.test_record.message_ids)
self.format_and_process(
MAIL_TEMPLATE, 'valid.other@gmail.com', 'erroneous@test.com>',
subject='Re: news', extra='In-Reply-To:\r\n\t%s\n' % self.fake_email.message_id)
self.assertEqual(len(self.test_record.message_ids), init_msg_count + 1)
self.assertEqual(self.fake_email.child_ids, self.test_record.message_ids[0])
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references(self):
""" Incoming email using references should go into the right destination even with a wrong destination """
init_msg_count = len(self.test_record.message_ids)
self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'erroneous@test.com',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id)
self.assertEqual(len(self.test_record.message_ids), init_msg_count + 1)
self.assertEqual(self.fake_email.child_ids, self.test_record.message_ids[0])
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_references_external(self):
""" Incoming email being a reply to an external email processed by odoo should update thread accordingly """
new_message_id = '<ThisIsTooMuchFake.MonsterEmail.789@agrolait.com>'
self.fake_email.write({
'message_id': new_message_id
})
init_msg_count = len(self.test_record.message_ids)
self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'erroneous@test.com',
extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id)
self.assertEqual(len(self.test_record.message_ids), init_msg_count + 1)
self.assertEqual(self.fake_email.child_ids, self.test_record.message_ids[0])
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_process_references_forward(self):
""" Incoming email using references but with alias forward should not go into references destination """
self.env['mail.alias'].create({
'alias_name': 'test.alias',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
init_msg_count = len(self.test_record.message_ids)
res_test = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'test.alias@test.com',
subject='My Dear Forward', extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
target_model='mail.test.container')
self.assertEqual(len(self.test_record.message_ids), init_msg_count)
self.assertEqual(len(self.fake_email.child_ids), 0)
self.assertEqual(res_test.name, 'My Dear Forward')
self.assertEqual(len(res_test.message_ids), 1)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references_forward_same_model(self):
""" Incoming email using references but with alias forward on same model should be considered as a reply """
self.env['mail.alias'].create({
'alias_name': 'test.alias',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.gateway').id,
'alias_contact': 'everyone',
})
init_msg_count = len(self.test_record.message_ids)
res_test = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'test.alias@test.com',
subject='My Dear Forward', extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
target_model='mail.test.container')
self.assertEqual(len(self.test_record.message_ids), init_msg_count + 1)
self.assertEqual(len(self.fake_email.child_ids), 1)
self.assertFalse(res_test)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_references_forward_cc(self):
""" Incoming email using references but with alias forward in CC should be considered as a repy (To > Cc) """
self.env['mail.alias'].create({
'alias_name': 'test.alias',
'alias_user_id': False,
'alias_model_id': self.env['ir.model']._get('mail.test.container').id,
'alias_contact': 'everyone',
})
init_msg_count = len(self.test_record.message_ids)
res_test = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'catchall.test@test.com', cc='test.alias@test.com',
subject='My Dear Forward', extra='References: <2233@a.com>\r\n\t<3edss_dsa@b.com> %s' % self.fake_email.message_id,
target_model='mail.test.container')
self.assertEqual(len(self.test_record.message_ids), init_msg_count + 1)
self.assertEqual(len(self.fake_email.child_ids), 1)
self.assertFalse(res_test)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models', 'odoo.addons.mail.models.mail_mail')
def test_message_process_reply_to_new_thread(self):
""" Test replies not being considered as replies but use destination information instead (aka, mass post + specific reply to using aliases) """
first_record = self.env['mail.test.simple'].with_user(self.user_employee).create({'name': 'Replies to Record'})
record_msg = first_record.message_post(
subject='Discussion',
no_auto_thread=False,
subtype_xmlid='mail.mt_comment',
)
self.assertEqual(record_msg.reply_to, formataddr(('%s %s' % (self.user_employee.company_id.name, first_record.name), '%s@%s' % ('catchall.test', 'test.com'))))
mail_msg = first_record.message_post(
subject='Replies to Record',
reply_to='groups@test.com',
no_auto_thread=True,
subtype_xmlid='mail.mt_comment',
)
self.assertEqual(mail_msg.reply_to, 'groups@test.com')
# reply to mail but should be considered as a new mail for alias
msgID = '<this.is.duplicate.test@iron.sky>'
res_test = self.format_and_process(
MAIL_TEMPLATE, self.email_from, record_msg.reply_to, cc='',
subject='Re: Replies to Record', extra='In-Reply-To: %s' % record_msg.message_id,
msg_id=msgID, target_model='mail.test.simple')
incoming_msg = self.env['mail.message'].search([('message_id', '=', msgID)])
self.assertFalse(res_test)
self.assertEqual(incoming_msg.model, 'mail.test.simple')
self.assertEqual(incoming_msg.parent_id, first_record.message_ids[-1])
self.assertTrue(incoming_msg.res_id == first_record.id)
# reply to mail but should be considered as a new mail for alias
msgID = '<this.is.for.testing@iron.sky>'
res_test = self.format_and_process(
MAIL_TEMPLATE, self.email_from, mail_msg.reply_to, cc='',
subject='Re: Replies to Record', extra='In-Reply-To: %s' % mail_msg.message_id,
msg_id=msgID, target_model='mail.test.gateway')
incoming_msg = self.env['mail.message'].search([('message_id', '=', msgID)])
self.assertEqual(len(res_test), 1)
self.assertEqual(res_test.name, 'Re: Replies to Record')
self.assertEqual(incoming_msg.model, 'mail.test.gateway')
self.assertFalse(incoming_msg.parent_id)
self.assertTrue(incoming_msg.res_id == res_test.id)
# --------------------------------------------------
# Thread formation: mail gateway corner cases
# --------------------------------------------------
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_extra_model_res_id(self):
""" Incoming email with ref holding model / res_id but that does not match any message in the thread: must raise since OpenERP saas-3 """
self.assertRaises(ValueError,
self.format_and_process, MAIL_TEMPLATE,
self.partner_1.email_formatted, 'noone@test.com', subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.test.gateway@%s>' % (self.test_record.id, socket.gethostname()))
# when 6.1 messages are present, compat mode is available
# Odoo 10 update: compat mode has been removed and should not work anymore
self.fake_email.write({'message_id': False})
# Do: compat mode accepts partial-matching emails
self.assertRaises(
ValueError,
self.format_and_process, MAIL_TEMPLATE,
self.partner_1.email_formatted, 'noone@test.com>', subject='spam',
extra='In-Reply-To: <12321321-openerp-%d-mail.test.gateway@%s>' % (self.test_record.id, socket.gethostname()))
# Test created messages
self.assertEqual(len(self.test_record.message_ids), 1)
self.assertEqual(len(self.test_record.message_ids[0].child_ids), 0)
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_duplicate(self):
""" Duplicate emails (same message_id) are not processed """
self.alias.write({'alias_force_thread_id': self.test_record.id,})
# Post a base message
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Re: super cats', msg_id='<123?456.diff1@agrolait.com>')
self.assertFalse(record)
self.assertEqual(len(self.test_record.message_ids), 2)
# Do: due to some issue, same email goes back into the mailgateway
record = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'groups@test.com', subject='Re: news',
msg_id='<123?456.diff1@agrolait.com>', extra='In-Reply-To: <1198923581.41972151344608186799.JavaMail.diff1@agrolait.com>\n')
self.assertFalse(record)
self.assertEqual(len(self.test_record.message_ids), 2)
# Test: message_id is still unique
no_of_msg = self.env['mail.message'].search_count([('message_id', 'ilike', '<123?456.diff1@agrolait.com>')])
self.assertEqual(no_of_msg, 1,
'message_process: message with already existing message_id should not have been duplicated')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_crash_wrong_model(self):
""" Incoming email with model that does not accepts incoming emails must raise """
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE, self.email_from, 'noone@test.com',
subject='spam', extra='', model='res.country')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_crash_no_data(self):
""" Incoming email without model and without alias must raise """
self.assertRaises(ValueError,
self.format_and_process,
MAIL_TEMPLATE, self.email_from, 'noone@test.com',
subject='spam', extra='')
@mute_logger('odoo.addons.mail.models.mail_thread', 'odoo.models')
def test_message_process_fallback(self):
""" Incoming email with model that accepting incoming emails as fallback """
record = self.format_and_process(
MAIL_TEMPLATE, self.email_from, 'noone@test.com',
subject='Spammy', extra='', model='mail.test.gateway')
self.assertEqual(len(record), 1)
self.assertEqual(record.name, 'Spammy')
self.assertEqual(record._name, 'mail.test.gateway')
class TestMailThreadCC(TestMailCommon):
@classmethod
def setUpClass(cls):
super(TestMailThreadCC, cls).setUpClass()
cls.email_from = 'Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>'
cls.alias = cls.env['mail.alias'].create({
'alias_name': 'cc_record',
'alias_user_id': False,
'alias_model_id': cls.env['ir.model']._get('mail.test.cc').id,
'alias_contact': 'everyone'})
cls._init_mail_gateway()
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_cc_new(self):
record = self.format_and_process(MAIL_TEMPLATE, self.email_from, 'cc_record@test.com',
cc='cc1@example.com, cc2@example.com', target_model='mail.test.cc')
cc = email_split_and_format(record.email_cc)
self.assertEqual(sorted(cc), ['cc1@example.com', 'cc2@example.com'])
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_cc_update_with_old(self):
record = self.env['mail.test.cc'].create({'email_cc': 'cc1 <cc1@example.com>, cc2@example.com'})
self.alias.write({'alias_force_thread_id': record.id})
self.format_and_process(MAIL_TEMPLATE, self.email_from, 'cc_record@test.com',
cc='cc2 <cc2@example.com>, cc3@example.com', target_model='mail.test.cc')
cc = email_split_and_format(record.email_cc)
self.assertEqual(sorted(cc), ['"cc1" <cc1@example.com>', 'cc2@example.com', 'cc3@example.com'], 'new cc should have been added on record (unique)')
@mute_logger('odoo.addons.mail.models.mail_thread')
def test_message_cc_update_no_old(self):
record = self.env['mail.test.cc'].create({})
self.alias.write({'alias_force_thread_id': record.id})
self.format_and_process(MAIL_TEMPLATE, self.email_from, 'cc_record@test.com',
cc='cc2 <cc2@example.com>, cc3@example.com', target_model='mail.test.cc')
cc = email_split_and_format(record.email_cc)
self.assertEqual(sorted(cc), ['"cc2" <cc2@example.com>', 'cc3@example.com'], 'new cc should have been added on record (unique)')
|
benreynwar/fpga-sdrlib | refs/heads/master | python/fpga_sdrlib/flter/qa_flter.py | 1 | # Copyright (c) 2012 Ben Reynwar
# Released under MIT License (see LICENSE.txt)
import os
import random
import unittest
import logging
import shutil
from fpga_sdrlib.message import msg_utils
from fpga_sdrlib.conversions import f_to_int
from fpga_sdrlib.generate import logceil
from fpga_sdrlib import config, b100, buildutils
from fpga_sdrlib.testbench import TestBenchB100, TestBenchIcarusInner, TestBenchIcarusOuter
logger = logging.getLogger(__name__)
def convolve(data, taps):
out = []
data = [0]*(len(taps)-1) + data
for i in range(len(taps)-1, len(data)):
v = 0
for j in range(len(taps)):
v += data[i-j]*taps[j]
out.append(v)
return out
def taps_to_start_msgs(taps, width, target):
contents = [f_to_int(tap, width, clean1=True) for tap in taps]
packet = msg_utils.packet_from_content(contents, config.msg_length_width,
config.msg_width, target)
return packet
def prune_zeros(xs):
start_index = None
stop_index = None
for i, x in enumerate(xs):
if x != 0:
if start_index is None:
start_index = i
stop_index = i
if start_index is None:
return []
else:
return xs[start_index:stop_index+1]
class TestFilter(unittest.TestCase):
def test_one(self):
"""
Test the filter module.
"""
width = config.default_width
sendnth = config.default_sendnth
# Changing filter_length will require resynthesis.
filter_length = 4
taps = [random.random()*2-1 for i in range(filter_length)]
total = sum([abs(t) for t in taps])
taps = [t/total for t in taps]
# Arguments used for producing verilog from templates.
extraargs = {'summult_length': filter_length,}
# Amount of data to send.
n_data = 10
# Define the input
in_samples = [random.random()*2-1 + random.random()*2j-1j for i in range(n_data)]
in_samples += [0]*(filter_length-1)
steps_rqd = len(in_samples)*sendnth + 100
# Define meta data
mwidth = 1
in_ms = [random.randint(0, pow(2,mwidth)-1) for d in in_samples]
expected = convolve(in_samples, taps)
steps_rqd = n_data * sendnth * 2 + 1000
filter_id = 123
# Create, setup and simulate the test bench.
defines = config.updated_defines(
{'WIDTH': width,
'FILTER_LENGTH': filter_length,
'FILTER_ID': filter_id,
})
executable_inner = buildutils.generate_icarus_executable(
'flter', 'filter_inner', '-test', defines=defines, extraargs=extraargs)
executable_outer = buildutils.generate_icarus_executable(
'flter', 'filter', '-test', defines=defines, extraargs=extraargs)
fpgaimage = buildutils.generate_B100_image(
'flter', 'filter', '-test', defines=defines,
extraargs=extraargs)
start_msgs = taps_to_start_msgs(taps, defines['WIDTH']/2, filter_id)
tb_icarus_inner = TestBenchIcarusInner(executable_inner, in_samples, in_ms, start_msgs)
tb_icarus_outer = TestBenchIcarusOuter(executable_outer, in_samples, start_msgs)
tb_b100 = TestBenchB100(fpgaimage, in_samples, start_msgs)
for tb, steps in (
(tb_icarus_inner, steps_rqd),
(tb_icarus_outer, steps_rqd),
(tb_b100, 100000),
):
tb.run(steps)
# Confirm that our data is correct.
self.assertEqual(len(tb.out_samples), len(expected))
for r, e in zip(tb.out_samples, expected):
self.assertAlmostEqual(e, r, 3)
class TestFilterBank(unittest.TestCase):
def test_one(self):
"""
Test the filterbank module.
"""
width = config.default_width
sendnth = config.default_sendnth
# Changing filter_length will require resynthesis.
n_filters = 3
filter_length = 3
all_taps = []
combined_taps = []
for n in range(n_filters):
taps = [random.random()*2-1 for i in range(filter_length)]
total = sum([abs(t) for t in taps])
taps = [t/total for t in taps]
all_taps.append(taps)
combined_taps.extend(taps)
# Arguments used for producing verilog from templates.
extraargs = {'summult_length': filter_length,}
# Amount of data to send.
n_data = 30
# Define the input
in_samples = [0]*filter_length*n_filters*2
in_samples += [random.random()*2-1 + random.random()*2j-1j for i in range(n_data)]
in_samples += [0]*(filter_length-1)*n_filters
steps_rqd = len(in_samples)*sendnth + 100
# Define meta data
mwidth = 1
in_ms = [random.randint(0, pow(2,mwidth)-1) for d in in_samples]
possible_expected = []
for m in range(n_filters):
shifted_taps = all_taps[m:] + all_taps[:m]
expected_outputs = []
for n in range(n_filters):
filter_inputs = in_samples[n::n_filters]
convolved = convolve(filter_inputs, shifted_taps[n])
expected_outputs.append(convolved)
expected = []
for eo in zip(*expected_outputs):
expected.extend(eo)
possible_expected.append(expected)
steps_rqd = n_data * sendnth * 2 + 1000
# Create, setup and simulate the test bench.
filter_id = 123
defines = config.updated_defines(
{'WIDTH': width,
'FILTER_LENGTH': filter_length,
'FILTERBANK_ID': filter_id,
'N_FILTERS': n_filters,
'FILTERBANK_MSG_BUFFER_LENGTH': 128,
})
executable_inner = buildutils.generate_icarus_executable(
'flter', 'filterbank_inner', '-test', defines=defines, extraargs=extraargs)
executable_outer = buildutils.generate_icarus_executable(
'flter', 'filterbank', '-test', defines=defines, extraargs=extraargs)
fpgaimage = buildutils.generate_B100_image(
'flter', 'filterbank', '-test', defines=defines,
extraargs=extraargs)
start_msgs = taps_to_start_msgs(combined_taps, defines['WIDTH']/2, filter_id)
tb_icarus_inner = TestBenchIcarusInner(executable_inner, in_samples, in_ms, start_msgs)
tb_icarus_outer = TestBenchIcarusOuter(executable_outer, in_samples, start_msgs)
tb_b100 = TestBenchB100(fpgaimage, in_samples, start_msgs)
for tb, steps in (
(tb_icarus_inner, steps_rqd),
(tb_icarus_outer, steps_rqd),
(tb_b100, 100000),
):
tb.run(steps)
# Confirm that our data is correct.
received = prune_zeros(tb.out_samples)
tol = 0.001
matched_once = False
for expected in possible_expected:
expected = prune_zeros(expected)
matches = True
if (len(received) != len(expected)):
matches = False
else:
for r, e in zip(received, expected):
if (abs(r-e) > tol):
matches = False
break
if matches:
matched_once = True
self.assertTrue(matched_once)
if __name__ == '__main__':
config.setup_logging(logging.DEBUG)
#suite = unittest.TestLoader().loadTestsFromTestCase(TestFilterBank)
#unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
|
blaggacao/OpenUpgrade | refs/heads/8.0 | addons/crm/wizard/crm_phonecall_to_meeting.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_phonecall2meeting(osv.osv_memory):
""" Phonecall to Meeting """
_name = 'crm.phonecall2meeting'
_description = 'Phonecall To Meeting'
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Meeting form
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Phonecall to Meeting IDs
@param context: A standard dictionary for contextual values
"""
return {'type':'ir.actions.act_window_close'}
def action_make_meeting(self, cr, uid, ids, context=None):
""" This opens Meeting's calendar view to schedule meeting on current Phonecall
@return : Dictionary value for created Meeting view
"""
res = {}
phonecall_id = context and context.get('active_id', False) or False
if phonecall_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, phonecall_id, context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_state': 'open',
'default_name': phonecall.name,
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
t2abdulg/SALib | refs/heads/master | SALib/tests/test_morris.py | 2 | from __future__ import division
from nose.tools import with_setup, raises
from numpy.testing import assert_equal
import numpy as np
from ..sample.morris import sample, compute_optimised_trajectories
from ..util import read_param_file
def teardown():
pass
def setup_param_file():
filename = "SALib/tests/test_param_file.txt"
with open(filename, "w") as ofile:
ofile.write("Test 1,0,1.0\n")
ofile.write("Test 2,0,1.0\n")
ofile.write("Test 3,0,1.0\n")
def setup_param_file_with_groups():
filename = "SALib/tests/test_param_file_w_groups.txt"
with open(filename, "w") as ofile:
ofile.write("Test 1,0,1.0,Group 1\n")
ofile.write("Test 2,0,1.0,Group 1\n")
ofile.write("Test 3,0,1.0,Group 2\n")
def setup_param_file_with_groups_prime():
filename = "SALib/tests/test_param_file_w_groups_prime.txt"
with open(filename, "w") as ofile:
ofile.write("Test 1,0,1.0,Group 1\n")
ofile.write("Test 2,0,1.0,Group 2\n")
ofile.write("Test 3,0,1.0,Group 2\n")
ofile.write("Test 4,0,1.0,Group 3\n")
ofile.write("Test 5,0,1.0,Group 3\n")
ofile.write("Test 6,0,1.0,Group 3\n")
ofile.write("Test 7,0,1.0,Group 3\n")
def setup():
setup_param_file()
setup_param_file_with_groups()
setup_param_file_with_groups_prime()
@with_setup(setup_param_file_with_groups)
def test_group_in_param_file_read():
'''
Tests that groups in a parameter file are read correctly
'''
parameter_file = "SALib/tests/test_param_file_w_groups.txt"
problem = read_param_file(parameter_file)
groups, group_names = problem['groups']
assert_equal(problem['names'], ["Test 1", "Test 2", "Test 3"])
assert_equal(groups, np.matrix('1,0;1,0;0,1', dtype=np.int))
assert_equal(group_names, ['Group 1', 'Group 2'])
@raises(ValueError)
@with_setup(setup, teardown)
def test_grid_jump_lt_num_levels():
parameter_file = "SALib/tests/test_param_file.txt"
problem = read_param_file(parameter_file)
samples = 10
num_levels = 4
grid_jump = 4
sample(problem, samples, num_levels, grid_jump, \
optimal_trajectories=samples)
@raises(ValueError)
@with_setup(setup, teardown)
def test_optimal_trajectories_lt_samples():
parameter_file = "SALib/tests/test_param_file.txt"
problem = read_param_file(parameter_file)
samples = 10
num_levels = 4
grid_jump = 2
sample(problem, samples, num_levels, grid_jump, \
optimal_trajectories=samples)
@raises(ValueError)
@with_setup(setup, teardown)
def test_optimal_trajectories_lt_10():
parameter_file = "SALib/tests/test_param_file.txt"
problem = read_param_file(parameter_file)
samples = 10
num_levels = 4
grid_jump = 2
optimal_trajectories = 11
sample(problem, samples, num_levels, grid_jump, \
optimal_trajectories=optimal_trajectories)
@raises(ValueError)
@with_setup(setup, teardown)
def test_optimal_trajectories_gte_one():
parameter_file = "SALib/tests/test_param_file.txt"
problem = read_param_file(parameter_file)
samples = 10
num_levels = 4
grid_jump = 2
optimal_trajectories = 1
sample(problem, samples, num_levels, grid_jump, \
optimal_trajectories=optimal_trajectories)
def test_find_optimum_trajectories():
input_1 = [[0, 1 / 3.], [0, 1.], [2 / 3., 1.]]
input_2 = [[0, 1 / 3.], [2 / 3., 1 / 3.], [2 / 3., 1.]]
input_3 = [[2 / 3., 0], [2 / 3., 2 / 3.], [0, 2 / 3.]]
input_4 = [[1 / 3., 1.], [1., 1.], [1, 1 / 3.]]
input_5 = [[1 / 3., 1.], [1 / 3., 1 / 3.], [1, 1 / 3.]]
input_6 = [[1 / 3., 2 / 3.], [1 / 3., 0], [1., 0]]
input_sample = np.concatenate([input_1, input_2, input_3,
input_4, input_5, input_6])
N = 6
problem = {'num_vars': 2, 'groups': None}
k_choices = 4
output = compute_optimised_trajectories(problem, input_sample, N, k_choices)
expected = np.concatenate([input_1, input_3, input_4, input_6])
np.testing.assert_equal(output, expected)
@raises(ValueError)
def test_catch_inputs_not_in_zero_one_range():
input_1 = [[0, 1 / 3.], [0, 1.], [2 / 3., 1.]]
input_2 = [[0, 1 / 3.], [2 / 3., 1 / 3.], [2 / 3., 1.]]
input_3 = [[2 / 3., 0], [2 / 3., 2 / 3.], [0, 2 / 3.]]
input_4 = [[1 / 3., 1.], [1., 1.], [1, 1 / 3.]]
input_5 = [[1 / 3., 1.], [1 / 3., 1 / 3.], [1, 1 / 3.]]
input_6 = [[1 / 3., 2 / 3.], [1 / 3., 0], [1., 0]]
input_sample = np.concatenate([input_1, input_2, input_3,
input_4, input_5, input_6])
problem = {'num_vars': 2, 'groups': None}
k_choices = 4
N = 10
input_sample *= 10
compute_optimised_trajectories(problem, input_sample, N, k_choices)
@raises(ValueError)
def test_group_sample_fails_with_no_G_matrix():
N = 6
num_levels = 4
grid_jump = 2
problem = {'bounds': [[0., 1.], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'groups': (None, None)}
sample(problem, N, num_levels, grid_jump)
@raises(TypeError)
def test_group_sample_fails_with_wrong_G_matrix():
N = 6
num_levels = 4
grid_jump = 2
problem = {'bounds': [[0., 1.], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'groups': (list[1, 2, 3, 4], None)}
sample(problem, N, num_levels, grid_jump)
|
zzzirk/boto | refs/heads/master | tests/unit/ec2/test_spotinstance.py | 114 | from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
class TestCancelSpotInstanceRequests(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
return b"""
<CancelSpotInstanceRequestsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<spotInstanceRequestSet>
<item>
<spotInstanceRequestId>sir-1a2b3c4d</spotInstanceRequestId>
<state>cancelled</state>
</item>
<item>
<spotInstanceRequestId>sir-9f8e7d6c</spotInstanceRequestId>
<state>cancelled</state>
</item>
</spotInstanceRequestSet>
</CancelSpotInstanceRequestsResponse>
"""
def test_cancel_spot_instance_requests(self):
self.set_http_response(status_code=200)
response = self.service_connection.cancel_spot_instance_requests(['sir-1a2b3c4d',
'sir-9f8e7d6c'])
self.assert_request_parameters({
'Action': 'CancelSpotInstanceRequests',
'SpotInstanceRequestId.1': 'sir-1a2b3c4d',
'SpotInstanceRequestId.2': 'sir-9f8e7d6c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(response), 2)
self.assertEqual(response[0].id, 'sir-1a2b3c4d')
self.assertEqual(response[0].state, 'cancelled')
self.assertEqual(response[1].id, 'sir-9f8e7d6c')
self.assertEqual(response[1].state, 'cancelled')
class TestGetSpotPriceHistory(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
return b"""
<DescribeSpotPriceHistoryResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>b6c6978c-bd13-4ad7-9bc8-6f0ac9d32bcc</requestId>
<spotPriceHistorySet>
<item>
<instanceType>c3.large</instanceType>
<productDescription>Linux/UNIX</productDescription>
<spotPrice>0.032000</spotPrice>
<timestamp>2013-12-28T12:17:43.000Z</timestamp>
<availabilityZone>us-west-2c</availabilityZone>
</item>
<item>
<instanceType>c3.large</instanceType>
<productDescription>Windows (Amazon VPC)</productDescription>
<spotPrice>0.104000</spotPrice>
<timestamp>2013-12-28T07:49:40.000Z</timestamp>
<availabilityZone>us-west-2b</availabilityZone>
</item>
</spotPriceHistorySet>
<nextToken>q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl</nextToken>
</DescribeSpotPriceHistoryResponse>
"""
def test_get_spot_price_history(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_spot_price_history(
instance_type='c3.large')
self.assert_request_parameters({
'Action': 'DescribeSpotPriceHistory',
'InstanceType': 'c3.large'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(response), 2)
self.assertEqual(response.next_token,
'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl')
self.assertEqual(response.nextToken,
'q5GwEl5bMGjKq6YmhpDLJ7hEwyWU54jJC2GQ93n61vZV4s1+fzZ674xzvUlTihrl')
self.assertEqual(response[0].instance_type, 'c3.large')
self.assertEqual(response[0].availability_zone, 'us-west-2c')
self.assertEqual(response[1].instance_type, 'c3.large')
self.assertEqual(response[1].availability_zone, 'us-west-2b')
response = self.service_connection.get_spot_price_history(
filters={'instance-type': 'c3.large'})
self.assert_request_parameters({
'Action': 'DescribeSpotPriceHistory',
'Filter.1.Name': 'instance-type',
'Filter.1.Value.1': 'c3.large'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
response = self.service_connection.get_spot_price_history(
next_token='foobar')
self.assert_request_parameters({
'Action': 'DescribeSpotPriceHistory',
'NextToken': 'foobar'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
|
le717/Shutdown-Timer | refs/heads/master | ShutdownTimer.py | 1 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""Shutdown Timer - Small Windows shutdown timer.
Created 2013, 2015 Triangle717
<http://Triangle717.WordPress.com>
Shutdown Timer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Shutdown Timer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Shutdown Timer. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import json
import time
import argparse
import subprocess
import constants as const
__all__ = ("ShutdownTimer", "main")
class ShutdownTimer:
"""Core Shutdown Timer code and actions.
Exposes the following public properties and methods:
* {Tuple} verbs Two index tuple containing the action verbs.
Second index is the "ing" form of the verb.
* {Method} TODO.
* {Method} TODO.
* {Method} TODO.
* {Method} TODO.
* {Method} TODO.
"""
def __init__(self):
"""Initalize all properties and methods."""
self.__time = None
self.__force = False
self.__restart = False
self.__configData = None
self.__configPath = self.__getConfigPath()
self.__jsonFile = os.path.join(self.__configPath,
"Shutdown-Timer.json")
self.__loadConfig()
self.__commandLine()
self.verbs = self.__getVerb()
def __getConfigPath(self):
"""Get the file path where configuration files will be stored.
@returns {String} The configuration path,
`%AppData%/Triangle717/*AppName*`.
"""
root = os.path.expandvars("%AppData%")
# Create the path if needed
path = os.path.join(root, "Triangle717", "Shutdown-Timer")
if not os.path.exists(path):
os.makedirs(path)
return path
def __getVerb(self):
"""Set the action verbs for use in messages depending on restart status.
@return {Tuple} Two index tuple containing the action verbs.
Second index is the "ing" form of the verb.
"""
if self.__restart:
return ("restart", "restarting")
return ("shutdown", "shutting down")
def __getCommand(self):
"""Construct the shutdown command based on user option selection.
@returns {Array} The exact command to run, including any arguments.
"""
commands = ["shutdown.exe"]
# Restart or shutdown computer?
if self.__restart:
commands.append("/r")
else:
commands.append("/p")
# Force closing, do not wait for any programs
if self.__force:
commands.append("/f")
# Restarting will always have a timeout dialog before
# the process starts, remove it to match shutdown behavior
if self.__restart:
commands.append("/t")
commands.append("0")
return commands
def __runCommand(self):
"""Run the closing command."""
subprocess.call(self.__getCommand())
def __commandLine(self):
"""Command-line arguments parser.
@returns {Boolean} Always returns True.
"""
parser = argparse.ArgumentParser(
description="{0} Command-line arguments".format(const.appName))
# Force mode
parser.add_argument("-f", "--force",
help="Close Windows without waiting on programs",
action="store_true")
# Restart mode
parser.add_argument("-r", "--restart",
help="Restart Windows instead of shutting down",
action="store_true")
# Assign the arguments
args = parser.parse_args()
self.__force = args.force
self.__restart = args.restart
return True
def __loadConfig(self):
"""Read and store the configuration file.
@returns {Boolean} True if the config file was read, False otherwise.
"""
try:
# Make sure it exists
if os.path.exists(self.__jsonFile):
with open(self.__jsonFile, "rt", encoding="utf-8") as f:
self.__configData = json.load(f)
return True
return False
# The file is not valid JSON, sliently fail
except ValueError:
return False
def saveConfig(self):
"""Write the JSON-based config file.
@returns {Boolean} True if the config file was written,
False otherwise.
"""
try:
jsonData = {
"force": self.__force,
"restart": self.__restart,
"time": self.__time
}
with open(self.__jsonFile, "wt", encoding="utf_8") as f:
f.write(json.dumps(jsonData, indent=4, sort_keys=True))
return True
# Silently fail
except PermissionError:
return False
def __isBetween(self, val, minV, maxV):
"""Check that a value is within inclusive acceptable range.
@param {Number} val The value to check.
@param {Number} minV The maximum value.
@param {Number} maxV The minimum value.
@return {Boolean} True if in range, False if not.
"""
return val >= minV and val <= maxV
def __getCurTime(self):
"""Get the current time, according to the system clock.
@return {Tuple}
"""
curTime = time.localtime()
return (curTime[3], curTime[4], curTime[5])
def __calcHoursLeft(self, curHour, offHour):
"""Calculate the number of hours that remain until closing.
@param {Number} curHour TODO.
@param {Number} offHour TODO.
@return {Number} The number of hours remaining.
"""
# It will happpen this very hour
if curHour == offHour:
return 0
# 4-23 hours over night
elif curHour > offHour:
# Midnight through noon
if self.__isBetween(offHour, 0, 12):
return (24 + offHour) - curHour
# 1 PM through 11 PM
elif self.__isBetween(offHour, 13, 23):
return 24 + (offHour - curHour)
# 1-18 hours today
elif offHour > curHour:
return (offHour - curHour) - 1
def __countDown(self):
"""Calculate remaining time and wait until closing can occur."""
curHour, curMin, curSec = self.__getCurTime()
# If the shutdown time does not equal, the current time,
# as defined by the local system's clock
while (
"{0}:{1}".format(curHour, curMin) !=
"{0}:{1}".format(self.__time[0], self.__time[1])
):
curHour, curMin, curSec = self.__getCurTime()
# Calculate remaining hours
remainHours = self.__calcHoursLeft(curHour, self.__time[0])
# Calculate remaining minutes
if curMin > self.__time[1]:
remainMins = curMin - (self.__time[1] - 1)
else:
remainMins = (self.__time[1] - 1) - curMin
# Prevent the minutes from reading -1
if remainMins == -1:
remainMins = 0
# Calculate remaining seconds
remainSecs = 60 - curSec
# Prevent the seconds from reading 60
if remainSecs == 60:
remainSecs = "00"
# Add the leading zeros
elif self.__isBetween(remainSecs, 1, 9):
remainSecs = "0{0}".format(remainSecs)
# Display remaining time
remainTime = "{0}:{1}".format(remainMins, remainSecs)
# Display hours if needed too
if remainHours != 0:
remainTime = "{0}:{1}".format(remainHours, remainTime)
print("Time remaining until {0}: {1}".format(
self.verbs[0], remainTime))
time.sleep(1)
# Close the computer
print("\nYour computer will now {0}.".format(self.verbs[0]))
return True
def getTime(self):
"""Get the time the computer will close.
@return {String} Closing time.
"""
time = []
# Hours
if self.__isBetween(self.__time[0], 0, 9):
time.append("0{0}".format(self.__time[0]))
else:
time.append(str(self.__time[0]))
# Add the colon
time.append(":")
# Minutes
if self.__isBetween(self.__time[1], 0, 9):
time.append("0{0}".format(self.__time[1]))
else:
time.append(str(self.__time[1]))
return "".join(time)
def setTime(self, userTime):
"""Validate and set the time the computer will close.
@param {String} userTime The user-provided time to close.
@return {!Boolean} True if the time was set,
False if defined time format was not followed,
A ValueError will be raised if a value
is not in acceptable range.
"""
# Make sure it follows a certain format
formatRegex = re.match(r"(\d{2}):(\d{2})", userTime)
if formatRegex is None:
print("The time is not in the required HH:MM format!")
return False
# Convert the values to intergers
hours = int(formatRegex.group(1))
mins = int(formatRegex.group(2))
# Hours value is out of range
if not self.__isBetween(hours, 0, 24):
raise ValueError("Hour values must be between 0 and 24.")
# Minutes value is out of range
if not self.__isBetween(mins, 0, 59):
raise ValueError("Minute values must be between 0 and 59.")
# Store the time
self.__time = (hours, mins)
return True
def start(self):
"""Start the timer and send command to close the computer."""
print()
if self.__countDown():
self.__runCommand()
def setModes(self, force=False, restart=False):
"""TODO.
@param {Boolean} force TODO.
@param {Boolean} restart TODO.
@returns {Boolean} Always returns True.
"""
self.__force = force
self.__restart = restart
return True
def getModes(self):
"""Get the Windows closing options.
@return {Tuple} Three index tuple containing Boolean values for
force and restart modes. In all case, a value of True
represents that mode is enabled and False disabled.
"""
return (self.__force, self.__restart)
def main():
"""Basic temporary UI until TODO GUI is implemented."""
os.system("title {0} v{1}".format(const.appName, const.version))
timer = ShutdownTimer()
print("""
Enter the time you want the computer to {0}.
Use the 24-hour system in the following format: "HH:MM".
Submit a "q" to exit.""".format(timer.verbs[0]))
offTime = input("\n\n> ").lower().strip()
# Quit program
if offTime == "q":
raise SystemExit(0)
# The user's time was successfully set
if timer.setTime(offTime):
timer.saveConfig()
timer.start()
if __name__ == "__main__":
main()
|
spadae22/odoo | refs/heads/chris_master_8 | addons/website/tests/test_ui.py | 342 | import openerp.tests
class TestUi(openerp.tests.HttpCase):
def test_01_public_homepage(self):
self.phantom_js("/", "console.log('ok')", "openerp.website.snippet")
def test_03_admin_homepage(self):
self.phantom_js("/", "console.log('ok')", "openerp.website.editor", login='admin')
def test_04_admin_tour_banner(self):
self.phantom_js("/", "openerp.Tour.run('banner', 'test')", "openerp.Tour.tours.banner", login='admin')
# vim:et:
|
loopCM/chromium | refs/heads/trunk | chrome/test/install_test/chrome.py | 36 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extended WebDriver interface that uses helper extension.
This file is makeshift and should eventually be switched over to
using the new ChromeDriver python interface. However, as that is
not quite ready, this class simply installs a helper extension
and executes scripts in the background page to access extension
APIs.
This may end up being merged with chrome/test/ext_auto, if they
accomplish similar enough purposes. For now, integration with that
is a bit premature.
"""
import os
from selenium import webdriver
_CHROME_GET_VIEW_HANDLES = 'chrome.getViewHandles'
_EXTENSION = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'ext_auto')
class Chrome(webdriver.Remote):
"""Extended WebDriver interface that uses helper extension."""
def __init__(self, url, desired_capabilities, options=None):
"""Initializes Chrome object.
If both desired_capabilities and options have the same settings, the
settings from options will be used.
Args:
url: The URL of the ChromeDriver Service.
desired_capabilities: Chrome capabilities dictionary.
options: chrome_options.ChromeOptions object. Settings in options will
overwrite settings in desired_capabilities.
Raises:
RuntimeError: Unable to find helper extension.
"""
if options is not None:
desired_capabilities.update(options.GetCapabilities())
switches = desired_capabilities.get('chrome.switches', [])
switches += ['--load-extension=' + _EXTENSION]
desired_capabilities['chrome.switches'] = switches
super(Chrome, self).__init__(url, desired_capabilities)
custom_commands = {
_CHROME_GET_VIEW_HANDLES:
('GET', '/session/$sessionId/chrome/views'),
}
self.command_executor._commands.update(custom_commands)
views = self.execute(_CHROME_GET_VIEW_HANDLES)['value']
self.set_script_timeout(30) # TODO(kkania): Make this configurable.
for view in views:
if view.get('extension_id') == 'aapnijgdinlhnhlmodcfapnahmbfebeb':
self._extension = view['handle']
break
else:
raise RuntimeError('Unable to find helper extension')
def _execute_extension_command(self, name, params={}):
"""Executes an extension command.
When Chrome is started, a helper extension is loaded which provides
a simple synchronous API for manipulating Chrome via the extension
APIs. Communication with the extension is accomplished by executing
a script in the background page of the extension which calls the
'executeCommand' function with the name of the command, a parameter
dictionary, and a callback function that can be used to signal
when the command is finished and potentially send a return value.
"""
old_window = self.current_window_handle
self.switch_to_window(self._extension)
self.execute_async_script(
'executeCommand.apply(null, arguments)', name, params)
self.switch_to_window(old_window)
def create_tab(self, url=None):
"""Creates a new tab with the given URL and switches to it.
If no URL is provided, the homepage will be used.
"""
params = {}
if url is not None:
params['url'] = url
self._execute_extension_command('createTab', params)
self.switch_to_window(self.window_handles[-1])
def create_blank_tab(self):
"""Creates a new blank tab and switches to it."""
self.create_tab('about:blank')
|
dekadev/Deka-kernel-CM10.1-3.4 | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
sbalde/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/open_ended_grading_classes/grading_service_module.py | 106 | # This class gives a common interface for logging into the grading controller
import logging
import requests
import dogstats_wrapper as dog_stats_api
from lxml import etree
from requests.exceptions import RequestException, ConnectionError, HTTPError
from .combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
log = logging.getLogger(__name__)
class GradingServiceError(Exception):
"""
Exception for grading service. Shown when Open Response Assessment servers cannot be reached.
"""
pass
class GradingService(object):
"""
Interface to staff grading backend.
"""
def __init__(self, config):
self.username = config['username']
self.password = config['password']
self.session = requests.Session()
self.render_template = config['render_template']
def _login(self):
"""
Log into the staff grading service.
Raises requests.exceptions.HTTPError if something goes wrong.
Returns the decoded json dict of the response.
"""
response = self.session.post(self.login_url,
{'username': self.username,
'password': self.password, })
response.raise_for_status()
return response.json()
def _metric_name(self, suffix):
"""
Return a metric name for datadog, using `self.METRIC_NAME` as
a prefix, and `suffix` as the suffix.
Arguments:
suffix (str): The metric suffix to use.
"""
return '{}.{}'.format(self.METRIC_NAME, suffix)
def _record_result(self, action, data, tags=None):
"""
Log results from an API call to an ORA service to datadog.
Arguments:
action (str): The ORA action being recorded.
data (dict): The data returned from the ORA service. Should contain the key 'success'.
tags (list): A list of tags to attach to the logged metric.
"""
if tags is None:
tags = []
tags.append(u'result:{}'.format(data.get('success', False)))
tags.append(u'action:{}'.format(action))
dog_stats_api.increment(self._metric_name('request.count'), tags=tags)
def post(self, url, data, allow_redirects=False):
"""
Make a post request to the grading controller. Returns the parsed json results of that request.
"""
try:
op = lambda: self.session.post(url, data=data,
allow_redirects=allow_redirects)
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem posting data to the grading controller. URL: {0}, data: {1}".format(url, data)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def get(self, url, params, allow_redirects=False):
"""
Make a get request to the grading controller. Returns the parsed json results of that request.
"""
op = lambda: self.session.get(url,
allow_redirects=allow_redirects,
params=params)
try:
response_json = self._try_with_login(op)
except (RequestException, ConnectionError, HTTPError, ValueError) as err:
# reraise as promised GradingServiceError, but preserve stacktrace.
#This is a dev_facing_error
error_string = "Problem getting data from the grading controller. URL: {0}, params: {1}".format(url, params)
log.error(error_string)
raise GradingServiceError(error_string)
return response_json
def _try_with_login(self, operation):
"""
Call operation(), which should return a requests response object. If
the request fails with a 'login_required' error, call _login() and try
the operation again.
Returns the result of operation(). Does not catch exceptions.
"""
response = operation()
resp_json = response.json()
if (resp_json
and resp_json.get('success') is False
and resp_json.get('error') == 'login_required'):
# apparently we aren't logged in. Try to fix that.
r = self._login()
if r and not r.get('success'):
log.warning("Couldn't log into ORA backend. Response: %s",
r)
# try again
response = operation()
response.raise_for_status()
resp_json = response.json()
return resp_json
def _render_rubric(self, response, view_only=False):
"""
Given an HTTP Response json with the key 'rubric', render out the html
required to display the rubric and put it back into the response
returns the updated response as a dictionary that can be serialized later
"""
try:
if 'rubric' in response:
rubric = response['rubric']
rubric_renderer = CombinedOpenEndedRubric(self.render_template, view_only)
rubric_dict = rubric_renderer.render_rubric(rubric)
success = rubric_dict['success']
rubric_html = rubric_dict['html']
response['rubric'] = rubric_html
return response
# if we can't parse the rubric into HTML,
except (etree.XMLSyntaxError, RubricParsingError):
#This is a dev_facing_error
log.exception("Cannot parse rubric string. Raw string: {0}".format(response['rubric']))
return {'success': False,
'error': 'Error displaying submission'}
except ValueError:
#This is a dev_facing_error
log.exception("Error parsing response: {0}".format(response))
return {'success': False,
'error': "Error displaying submission"}
|
raghavs1108/DataPlotter | refs/heads/master | pyqtgraph/graphicsItems/ItemGroup.py | 53 | from ..Qt import QtGui, QtCore
from .GraphicsObject import GraphicsObject
__all__ = ['ItemGroup']
class ItemGroup(GraphicsObject):
"""
Replacement for QGraphicsItemGroup
"""
def __init__(self, *args):
GraphicsObject.__init__(self, *args)
if hasattr(self, "ItemHasNoContents"):
self.setFlag(self.ItemHasNoContents)
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
def addItem(self, item):
item.setParentItem(self)
|
aifil/odoo | refs/heads/8.0 | addons/website_hr/models/__init__.py | 439 | import hr
|
selwin/Django-facebook | refs/heads/master | facebook_example/manage.py | 27 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "facebook_example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
monetizeio/django-pgmp | refs/heads/master | django_pgmp/db/fields/__init__.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp.db.fields -----------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
from .mpq import MultiPrecisionFractionField
from .mpz import MultiPrecisionIntegerField
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
|
gaolichuang/neutron-fwaas | refs/heads/master | neutron_fwaas/db/cisco/__init__.py | 12133432 | |
alexgorban/models | refs/heads/master | official/vision/detection/dataloader/maskrcnn_parser.py | 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data parser and processing for Mask R-CNN."""
import tensorflow.compat.v2 as tf
from official.vision.detection.dataloader import anchor
from official.vision.detection.dataloader import mode_keys as ModeKeys
from official.vision.detection.dataloader import tf_example_decoder
from official.vision.detection.utils import box_utils
from official.vision.detection.utils import dataloader_utils
from official.vision.detection.utils import input_utils
class Parser(object):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
rpn_match_threshold=0.7,
rpn_unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
skip_crowd_during_training=True,
max_num_instances=100,
include_mask=False,
mask_crop_size=112,
use_bfloat16=True,
mode=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
rpn_match_threshold:
rpn_unmatched_threshold:
rpn_batch_size_per_im:
rpn_fg_fraction:
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
include_mask: a bool to indicate whether parse mask groundtruth.
mask_crop_size: the size which groundtruth mask is cropped to.
use_bfloat16: `bool`, if True, cast output image to tf.bfloat16.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction
or prediction with groundtruths in the outputs.
"""
self._mode = mode
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
self._is_training = (mode == ModeKeys.TRAIN)
self._example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask=include_mask)
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
# Target assigning.
self._rpn_match_threshold = rpn_match_threshold
self._rpn_unmatched_threshold = rpn_unmatched_threshold
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# Mask.
self._include_mask = include_mask
self._mask_crop_size = mask_crop_size
# Device.
self._use_bfloat16 = use_bfloat16
# Data is parsed depending on the model Modekey.
if mode == ModeKeys.TRAIN:
self._parse_fn = self._parse_train_data
elif mode == ModeKeys.EVAL:
self._parse_fn = self._parse_eval_data
elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT:
self._parse_fn = self._parse_predict_data
else:
raise ValueError('mode is not defined.')
def __call__(self, value):
"""Parses data to an image and associated training labels.
Args:
value: a string tensor holding a serialized tf.Example proto.
Returns:
image, labels: if mode == ModeKeys.TRAIN. see _parse_train_data.
{'images': image, 'labels': labels}: if mode == ModeKeys.PREDICT
or ModeKeys.PREDICT_WITH_GT.
"""
with tf.name_scope('parser'):
data = self._example_decoder.decode(value)
return self._parse_fn(data)
def _parse_train_data(self, data):
"""Parses data for training.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following describes
{key: value} pairs in the dictionary.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
rpn_score_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
rpn_box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
image that is fed to the network. The tennsor is padded with -1 to
the fixed dimension [self._max_num_instances, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by mask_crop_size.
"""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
if self._include_mask:
masks = data['groundtruth_instance_masks']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
if self._include_mask:
masks = tf.gather(masks, indices)
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
if self._include_mask:
image, boxes, masks = input_utils.random_horizontal_flip(
image, boxes, masks)
else:
image, boxes = input_utils.random_horizontal_flip(
image, boxes)
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
# Now the coordinates of boxes are w.r.t the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
if self._include_mask:
masks = tf.gather(masks, indices)
# Transfer boxes to the original image space and do normalization.
cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
cropped_boxes = box_utils.normalize_boxes(cropped_boxes, image_shape)
num_masks = tf.shape(masks)[0]
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
cropped_boxes,
box_indices=tf.range(num_masks, dtype=tf.int32),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear')
masks = tf.squeeze(masks, axis=-1)
# Assigns anchor targets.
# Note that after the target assignment, box targets are absolute pixel
# offsets w.r.t. the scaled image.
input_anchor = anchor.Anchor(
self._min_level,
self._max_level,
self._num_scales,
self._aspect_ratios,
self._anchor_size,
(image_height, image_width))
anchor_labeler = anchor.RpnAnchorLabeler(
input_anchor,
self._rpn_match_threshold,
self._rpn_unmatched_threshold,
self._rpn_batch_size_per_im,
self._rpn_fg_fraction)
rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
inputs = {
'image': image,
'image_info': image_info,
}
# Packs labels for model_fn outputs.
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
'rpn_score_targets': rpn_score_targets,
'rpn_box_targets': rpn_box_targets,
}
inputs['gt_boxes'] = input_utils.pad_to_fixed_size(boxes,
self._max_num_instances,
-1)
inputs['gt_classes'] = input_utils.pad_to_fixed_size(
classes, self._max_num_instances, -1)
if self._include_mask:
inputs['gt_masks'] = input_utils.pad_to_fixed_size(
masks, self._max_num_instances, -1)
return inputs, labels
def _parse_eval_data(self, data):
"""Parses data for evaluation."""
raise NotImplementedError('Not implemented!')
def _parse_predict_data(self, data):
"""Parses data for prediction.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
A dictionary of {'images': image, 'labels': labels} where
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following
describes {key: value} pairs in the dictionary.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
image_info: a 2D `Tensor` that encodes the information of the image
and the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each
level.
"""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Compute Anchor boxes.
input_anchor = anchor.Anchor(
self._min_level,
self._max_level,
self._num_scales,
self._aspect_ratios,
self._anchor_size,
(image_height, image_width))
labels = {
'image_info': image_info,
}
if self._mode == ModeKeys.PREDICT_WITH_GT:
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape)
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# TODO(yeqing): Remove the `groundtrtuh` layer key (no longer needed).
labels['groundtruths'] = groundtruths
inputs = {
'image': image,
'image_info': image_info,
}
return inputs, labels
|
wilsonianb/nacl_contracts | refs/heads/master | pnacl/driver/artools.py | 4 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
# Tool for reading archive (.a) files
# For information about the archive file format, see:
# http://en.wikipedia.org/wiki/Ar_(Unix)
import driver_log
import elftools
import pathtools
# See above link to wiki entry on archive format.
AR_MAGIC = '!<arch>\n'
# Thin archives are like normal archives except that there are only
# indirect references to each member (the data is not embedded).
# See manpage for a description of this.
THIN_MAGIC = '!<thin>\n'
# filetype.IsArchive calls this IsArchive. Top-level tools should prefer
# filetype.IsArchive, both for consistency (i.e., all checks for file type come
# from that library), and because its results are cached.
def IsArchive(filename):
fp = driver_log.DriverOpen(filename, "rb")
magic = fp.read(len(AR_MAGIC))
fp.close()
return magic in [AR_MAGIC, THIN_MAGIC]
def GetMemberFilename(member, strtab_data):
""" Get the real filename of the archive member. """
if not member.is_long_name:
return member.name.strip()
else:
# GNU style long filenames are /[index]
# where index is a position within the strtab_data.
name_index = int(member.name[1:].strip())
name_data = strtab_data[name_index:]
name_data = name_data.split('\n', 2)[0]
assert (name_data.endswith('/'))
return name_data[:-1]
def GetThinArchiveData(archive_filename, member, strtab_data):
# Get member's filename (relative to the archive) and open the member
# ourselves to check the data.
member_filename = GetMemberFilename(member, strtab_data)
member_filename = pathtools.join(
pathtools.dirname(pathtools.abspath(archive_filename)),
member_filename)
member_fp = driver_log.DriverOpen(member_filename, 'rb')
data = member_fp.read(member.size)
member_fp.close()
return data
def GetArchiveType(filename):
fp = driver_log.DriverOpen(filename, "rb")
# Read the archive magic header
magic = fp.read(len(AR_MAGIC))
assert(magic in [AR_MAGIC, THIN_MAGIC])
# Find a regular file or symbol table
empty_file = True
found_type = ''
strtab_data = ''
while not found_type:
member = MemberHeader(fp)
if member.error == 'EOF':
break
elif member.error:
driver_log.Log.Fatal("%s: %s", filename, member.error)
empty_file = False
if member.is_regular_file:
if not magic == THIN_MAGIC:
data = fp.read(member.size)
else:
# For thin archives, do not read the data section.
# We instead must get at the member indirectly.
data = GetThinArchiveData(filename, member, strtab_data)
if data.startswith('BC'):
found_type = 'archive-bc'
else:
elf_header = elftools.DecodeELFHeader(data, filename)
if elf_header:
found_type = 'archive-%s' % elf_header.arch
elif member.is_strtab:
# We need the strtab data to get long filenames.
data = fp.read(member.size)
strtab_data = data
continue
else:
# Other symbol tables we can just skip ahead.
data = fp.read(member.size)
continue
if empty_file:
# Empty archives are treated as bitcode ones.
found_type = 'archive-bc'
elif not found_type:
driver_log.Log.Fatal("%s: Unable to determine archive type", filename)
fp.close()
return found_type
class MemberHeader(object):
def __init__(self, fp):
self.error = ''
header = fp.read(60)
if len(header) == 0:
self.error = "EOF"
return
if len(header) != 60:
self.error = 'Short count reading archive member header'
return
self.name = header[0:16]
self.size = header[48:48 + 10]
self.fmag = header[58:60]
if self.fmag != '`\n':
self.error = 'Invalid archive member header magic string %s' % header
return
self.size = int(self.size)
self.is_svr4_symtab = (self.name == '/ ')
self.is_llvm_symtab = (self.name == '#_LLVM_SYM_TAB_#')
self.is_bsd4_symtab = (self.name == '__.SYMDEF SORTED')
self.is_strtab = (self.name == '// ')
self.is_regular_file = not (self.is_svr4_symtab or
self.is_llvm_symtab or
self.is_bsd4_symtab or
self.is_strtab)
# BSD style long names (not supported)
if self.name.startswith('#1/'):
self.error = "BSD-style long file names not supported"
return
# If it's a GNU long filename, note this. We use this for thin archives.
self.is_long_name = (self.is_regular_file and self.name.startswith('/'))
if self.is_regular_file and not self.is_long_name:
# Filenames end with '/' and are padded with spaces up to 16 bytes
self.name = self.name.strip()[:-1]
|
tonybaloney/st2 | refs/heads/master | st2tests/st2tests/fixturesloader.py | 5 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import six
from st2common.content.loader import MetaLoader
from st2common.models.api.action import (ActionAPI, LiveActionAPI, ActionExecutionStateAPI,
RunnerTypeAPI, ActionAliasAPI)
from st2common.models.api.auth import ApiKeyAPI, UserAPI
from st2common.models.api.execution import (ActionExecutionAPI)
from st2common.models.api.policy import (PolicyTypeAPI, PolicyAPI)
from st2common.models.api.rule import (RuleAPI)
from st2common.models.api.rule_enforcement import RuleEnforcementAPI
from st2common.models.api.sensor import SensorTypeAPI
from st2common.models.api.trace import TraceAPI
from st2common.models.api.trigger import (TriggerAPI, TriggerTypeAPI, TriggerInstanceAPI)
from st2common.models.db.action import ActionDB
from st2common.models.db.actionalias import ActionAliasDB
from st2common.models.db.auth import ApiKeyDB, UserDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.models.db.runner import RunnerTypeDB
from st2common.models.db.execution import (ActionExecutionDB)
from st2common.models.db.policy import (PolicyTypeDB, PolicyDB)
from st2common.models.db.rule import RuleDB
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.models.db.sensor import SensorTypeDB
from st2common.models.db.trace import TraceDB
from st2common.models.db.trigger import (TriggerDB, TriggerTypeDB, TriggerInstanceDB)
from st2common.persistence.action import Action
from st2common.persistence.actionalias import ActionAlias
from st2common.persistence.execution import ActionExecution
from st2common.persistence.executionstate import ActionExecutionState
from st2common.persistence.auth import ApiKey, User
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.runner import RunnerType
from st2common.persistence.policy import (PolicyType, Policy)
from st2common.persistence.rule import Rule
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.persistence.sensor import SensorType
from st2common.persistence.trace import Trace
from st2common.persistence.trigger import (Trigger, TriggerType, TriggerInstance)
ALLOWED_DB_FIXTURES = ['actions', 'actionstates', 'aliases', 'executions', 'liveactions',
'policies', 'policytypes', 'rules', 'runners', 'sensors',
'triggertypes', 'triggers', 'triggerinstances', 'traces', 'apikeys',
'users', 'enforcements']
ALLOWED_FIXTURES = copy.copy(ALLOWED_DB_FIXTURES)
ALLOWED_FIXTURES.extend(['actionchains', 'workflows'])
FIXTURE_DB_MODEL = {
'actions': ActionDB,
'aliases': ActionAliasDB,
'actionstates': ActionExecutionStateDB,
'apikeys': ApiKeyDB,
'enforcements': RuleEnforcementDB,
'executions': ActionExecutionDB,
'liveactions': LiveActionDB,
'policies': PolicyDB,
'policytypes': PolicyTypeDB,
'rules': RuleDB,
'runners': RunnerTypeDB,
'sensors': SensorTypeDB,
'traces': TraceDB,
'triggertypes': TriggerTypeDB,
'triggers': TriggerDB,
'triggerinstances': TriggerInstanceDB,
'users': UserDB
}
FIXTURE_API_MODEL = {
'actions': ActionAPI,
'aliases': ActionAliasAPI,
'actionstates': ActionExecutionStateAPI,
'apikeys': ApiKeyAPI,
'enforcements': RuleEnforcementAPI,
'executions': ActionExecutionAPI,
'liveactions': LiveActionAPI,
'policies': PolicyAPI,
'policytypes': PolicyTypeAPI,
'rules': RuleAPI,
'runners': RunnerTypeAPI,
'sensors': SensorTypeAPI,
'traces': TraceAPI,
'triggertypes': TriggerTypeAPI,
'triggers': TriggerAPI,
'triggerinstances': TriggerInstanceAPI,
'users': UserAPI
}
FIXTURE_PERSISTENCE_MODEL = {
'actions': Action,
'aliases': ActionAlias,
'actionstates': ActionExecutionState,
'apikeys': ApiKey,
'enforcements': RuleEnforcement,
'executions': ActionExecution,
'liveactions': LiveAction,
'policies': Policy,
'policytypes': PolicyType,
'rules': Rule,
'runners': RunnerType,
'sensors': SensorType,
'traces': Trace,
'triggertypes': TriggerType,
'triggers': Trigger,
'triggerinstances': TriggerInstance,
'users': User
}
def get_fixtures_base_path():
return os.path.join(os.path.dirname(__file__), 'fixtures')
def get_fixtures_packs_base_path():
return os.path.join(os.path.dirname(__file__), 'fixtures/packs')
def get_fixtures_runners_base_path():
return os.path.join(os.path.dirname(__file__), 'fixtures/packs/runners/')
def get_resources_base_path():
return os.path.join(os.path.dirname(__file__), 'resources')
class FixturesLoader(object):
def __init__(self):
self.meta_loader = MetaLoader()
def save_fixtures_to_db(self, fixtures_pack='generic', fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict into the database
and returns DB models for the fixtures.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if fixtures_dict is None:
fixtures_dict = {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)
db_models = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)
loaded_fixtures = {}
for fixture in fixtures:
# Guard against copy and type and similar typos
if fixture in loaded_fixtures:
msg = 'Fixture "%s" is specified twice, probably a typo.' % (fixture)
raise ValueError(msg)
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
db_model = PERSISTENCE_MODEL.add_or_update(db_model)
loaded_fixtures[fixture] = db_model
db_models[fixture_type] = loaded_fixtures
return db_models
def load_fixtures(self, fixtures_pack='generic', fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict. We
simply want to load the meta into dict objects.
fixtures_dict should be of the form:
{
'actionchains': ['actionchain1.yaml', 'actionchain2.yaml'],
'workflows': ['workflow.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if not fixtures_dict:
return {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict)
all_fixtures = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
loaded_fixtures = {}
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
loaded_fixtures[fixture] = fixture_dict
all_fixtures[fixture_type] = loaded_fixtures
return all_fixtures
def load_models(self, fixtures_pack='generic', fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict as db models. This method must be
used for fixtures that have associated DB models. We simply want to load the
meta as DB models but don't want to save them to db.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if not fixtures_dict:
return {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)
all_fixtures = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
loaded_models = {}
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
loaded_models[fixture] = db_model
all_fixtures[fixture_type] = loaded_models
return all_fixtures
def delete_fixtures_from_db(self, fixtures_pack='generic', fixtures_dict=None,
raise_on_fail=False):
"""
Deletes fixtures specified in fixtures_dict from the database.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to delete fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
:type fixtures_dict: ``dict``
:param raise_on_fail: Optional If True, raises exception if delete fails on any fixture.
:type raise_on_fail: ``boolean``
"""
if not fixtures_dict:
return
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict)
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
# Note that when we have a reference mechanism consistent for
# every model, we can just do a get and delete the object. Until
# then, this model conversions are necessary.
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
try:
PERSISTENCE_MODEL.delete(db_model)
except:
if raise_on_fail:
raise
def delete_models_from_db(self, models_dict, raise_on_fail=False):
"""
Deletes models specified in models_dict from the database.
models_dict should be of the form:
{
'actions': [ACTION1, ACTION2],
'rules': [RULE1],
'liveactions': [EXECUTION]
}
:param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
:type fixtures_dict: ``dict``.
:param raise_on_fail: Optional If True, raises exception if delete fails on any model.
:type raise_on_fail: ``boolean``
"""
for model_type, models in six.iteritems(models_dict):
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(model_type, None)
for model in models:
try:
PERSISTENCE_MODEL.delete(model)
except:
if raise_on_fail:
raise
def _validate_fixtures_pack(self, fixtures_pack):
fixtures_pack_path = self._get_fixtures_pack_path(fixtures_pack)
if not self._is_fixture_pack_exists(fixtures_pack_path):
raise Exception('Fixtures pack not found ' +
'in fixtures path %s.' % get_fixtures_base_path())
return fixtures_pack_path
def _validate_fixture_dict(self, fixtures_dict, allowed=ALLOWED_FIXTURES):
fixture_types = fixtures_dict.keys()
for fixture_type in fixture_types:
if fixture_type not in allowed:
raise Exception('Disallowed fixture type: %s' % fixture_type)
def _is_fixture_pack_exists(self, fixtures_pack_path):
return os.path.exists(fixtures_pack_path)
def _get_fixture_file_path_abs(self, fixtures_pack_path, fixtures_type, fixture_name):
return os.path.join(fixtures_pack_path, fixtures_type, fixture_name)
def _get_fixtures_pack_path(self, fixtures_pack_name):
return os.path.join(get_fixtures_base_path(), fixtures_pack_name)
def get_fixture_file_path_abs(self, fixtures_pack, fixtures_type, fixture_name):
return os.path.join(get_fixtures_base_path(), fixtures_pack, fixtures_type, fixture_name)
|
Oweoqi/metagoofil | refs/heads/master | hachoir_core/field/byte_field.py | 95 | """
Very basic field: raw content with a size in byte. Use this class for
unknown content.
"""
from hachoir_core.field import Field, FieldError
from hachoir_core.tools import makePrintable
from hachoir_core.bits import str2hex
from hachoir_core import config
MAX_LENGTH = (2**64)
class RawBytes(Field):
"""
Byte vector of unknown content
@see: L{Bytes}
"""
static_size = staticmethod(lambda *args, **kw: args[1]*8)
def __init__(self, parent, name, length, description="Raw data"):
assert issubclass(parent.__class__, Field)
if not(0 < length <= MAX_LENGTH):
raise FieldError("Invalid RawBytes length (%s)!" % length)
Field.__init__(self, parent, name, length*8, description)
self._display = None
def _createDisplay(self, human):
max_bytes = config.max_byte_length
if type(self._getValue) is type(lambda: None):
display = self.value[:max_bytes]
else:
if self._display is None:
address = self.absolute_address
length = min(self._size / 8, max_bytes)
self._display = self._parent.stream.readBytes(address, length)
display = self._display
truncated = (8 * len(display) < self._size)
if human:
if truncated:
display += "(...)"
return makePrintable(display, "latin-1", quote='"', to_unicode=True)
else:
display = str2hex(display, format=r"\x%02x")
if truncated:
return '"%s(...)"' % display
else:
return '"%s"' % display
def createDisplay(self):
return self._createDisplay(True)
def createRawDisplay(self):
return self._createDisplay(False)
def hasValue(self):
return True
def createValue(self):
assert (self._size % 8) == 0
if self._display:
self._display = None
return self._parent.stream.readBytes(
self.absolute_address, self._size / 8)
class Bytes(RawBytes):
"""
Byte vector: can be used for magic number or GUID/UUID for example.
@see: L{RawBytes}
"""
pass
|
jgcaaprom/android_external_chromium_org | refs/heads/cm-12.1 | tools/perf/metrics/loading.py | 47 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import Metric
from telemetry.value import scalar
class LoadingMetric(Metric):
"""A metric for page loading time based entirely on window.performance"""
def Start(self, page, tab):
raise NotImplementedError()
def Stop(self, page, tab):
raise NotImplementedError()
def AddResults(self, tab, results):
load_timings = tab.EvaluateJavaScript('window.performance.timing')
# NavigationStart relative markers in milliseconds.
load_start = (
float(load_timings['loadEventStart']) - load_timings['navigationStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'load_start', 'ms', load_start))
dom_content_loaded_start = (
float(load_timings['domContentLoadedEventStart']) -
load_timings['navigationStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'dom_content_loaded_start', 'ms',
dom_content_loaded_start))
fetch_start = (
float(load_timings['fetchStart']) - load_timings['navigationStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'fetch_start', 'ms', fetch_start,
important=False))
request_start = (
float(load_timings['requestStart']) - load_timings['navigationStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_start', 'ms', request_start,
important=False))
# Phase measurements in milliseconds.
domain_lookup_duration = (
float(load_timings['domainLookupEnd']) -
load_timings['domainLookupStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'domain_lookup_duration', 'ms',
domain_lookup_duration, important=False))
connect_duration = (
float(load_timings['connectEnd']) - load_timings['connectStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'connect_duration', 'ms', connect_duration,
important=False))
request_duration = (
float(load_timings['responseStart']) - load_timings['requestStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_duration', 'ms', request_duration,
important=False))
response_duration = (
float(load_timings['responseEnd']) - load_timings['responseStart'])
results.AddValue(scalar.ScalarValue(
results.current_page, 'response_duration', 'ms', response_duration,
important=False))
|
deisi/home-assistant | refs/heads/master | homeassistant/components/isy994.py | 4 | """
Support the ISY-994 controllers.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994/
"""
import logging
from urllib.parse import urlparse
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import validate_config, discovery
from homeassistant.helpers.entity import ToggleEntity
DOMAIN = "isy994"
REQUIREMENTS = ['PyISY==1.0.6']
ISY = None
SENSOR_STRING = 'Sensor'
HIDDEN_STRING = '{HIDE ME}'
CONF_TLS_VER = 'tls'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup ISY994 component.
This will automatically import associated lights, switches, and sensors.
"""
import PyISY
# pylint: disable=global-statement
# check for required values in configuration file
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
# Pull and parse standard configuration.
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = urlparse(config[DOMAIN][CONF_HOST])
addr = host.geturl()
if host.scheme == 'http':
addr = addr.replace('http://', '')
https = False
elif host.scheme == 'https':
addr = addr.replace('https://', '')
https = True
else:
_LOGGER.error('isy994 host value in configuration file is invalid.')
return False
port = host.port
addr = addr.replace(':{}'.format(port), '')
# Pull and parse optional configuration.
global SENSOR_STRING
global HIDDEN_STRING
SENSOR_STRING = str(config[DOMAIN].get('sensor_string', SENSOR_STRING))
HIDDEN_STRING = str(config[DOMAIN].get('hidden_string', HIDDEN_STRING))
tls_version = config[DOMAIN].get(CONF_TLS_VER, None)
# Connect to ISY controller.
global ISY
ISY = PyISY.ISY(addr, port, user, password, use_https=https,
tls_ver=tls_version, log=_LOGGER)
if not ISY.connected:
return False
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load platforms for the devices in the ISY controller that we support.
for component in ('sensor', 'light', 'switch'):
discovery.load_platform(hass, component, DOMAIN, {}, config)
ISY.auto_update = True
return True
def stop(event):
"""Cleanup the ISY subscription."""
ISY.auto_update = False
class ISYDeviceABC(ToggleEntity):
"""An abstract Class for an ISY device."""
_attrs = {}
_onattrs = []
_states = []
_dtype = None
_domain = None
_name = None
def __init__(self, node):
"""Initialize the device."""
# setup properties
self.node = node
# track changes
self._change_handler = self.node.status. \
subscribe('changed', self.on_update)
def __del__(self):
"""Cleanup subscriptions because it is the right thing to do."""
self._change_handler.unsubscribe()
@property
def domain(self):
"""Return the domain of the entity."""
return self._domain
@property
def dtype(self):
"""Return the data type of the entity (binary or analog)."""
if self._dtype in ['analog', 'binary']:
return self._dtype
return 'binary' if self.unit_of_measurement is None else 'analog'
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def value(self):
"""Return the unclean value from the controller."""
# pylint: disable=protected-access
return self.node.status._val
@property
def state_attributes(self):
"""Return the state attributes for the node."""
attr = {}
for name, prop in self._attrs.items():
attr[name] = getattr(self, prop)
attr = self._attr_filter(attr)
return attr
def _attr_filter(self, attr):
"""A Placeholder for attribute filters."""
# pylint: disable=no-self-use
return attr
@property
def unique_id(self):
"""Return the ID of this ISY sensor."""
# pylint: disable=protected-access
return self.node._id
@property
def raw_name(self):
"""Return the unclean node name."""
return str(self._name) \
if self._name is not None else str(self.node.name)
@property
def name(self):
"""Return the cleaned name of the node."""
return self.raw_name.replace(HIDDEN_STRING, '').strip() \
.replace('_', ' ')
@property
def hidden(self):
"""Suggestion if the entity should be hidden from UIs."""
return HIDDEN_STRING in self.raw_name
def update(self):
"""Update state of the sensor."""
# ISY objects are automatically updated by the ISY's event stream
pass
def on_update(self, event):
"""Handle the update received event."""
self.update_ha_state()
@property
def is_on(self):
"""Return a boolean response if the node is on."""
return bool(self.value)
@property
def is_open(self):
"""Return boolean response if the node is open. On = Open."""
return self.is_on
@property
def state(self):
"""Return the state of the node."""
if len(self._states) > 0:
return self._states[0] if self.is_on else self._states[1]
return self.value
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.domain is not 'sensor':
attrs = [kwargs.get(name) for name in self._onattrs]
self.node.on(*attrs)
else:
_LOGGER.error('ISY cannot turn on sensors.')
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.domain is not 'sensor':
self.node.off()
else:
_LOGGER.error('ISY cannot turn off sensors.')
@property
def unit_of_measurement(self):
"""Return the defined units of measurement or None."""
try:
return self.node.units
except AttributeError:
return None
|
cobalys/django | refs/heads/master | tests/regressiontests/bug8245/admin.py | 150 | from __future__ import absolute_import
from django.contrib import admin
from .models import Story
admin.site.register(Story)
raise Exception("Bad admin module")
|
ericlink/adms-server | refs/heads/master | playframework-dist/play-1.1/python/Lib/UserDict.py | 2 | """A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
|
flabe81/flask-admin | refs/heads/master | examples/sqla-custom-filter/app.py | 13 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_admin.contrib import sqla
from flask_admin import Admin
# required for creating custom filters
from flask_admin.contrib.sqla.filters import BaseSQLAFilter, FilterEqual
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create model
class User(db.Model):
def __init__(self, first_name, last_name, username, email):
self.first_name = first_name
self.last_name = last_name
self.username = username
self.email = email
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
# Required for admin interface. For python 3 please use __str__ instead.
def __unicode__(self):
return self.username
# Create custom filter class
class FilterLastNameBrown(BaseSQLAFilter):
def apply(self, query, value, alias=None):
if value == '1':
return query.filter(self.column == "Brown")
else:
return query.filter(self.column != "Brown")
def operation(self):
return 'is Brown'
# Add custom filter and standard FilterEqual to ModelView
class UserAdmin(sqla.ModelView):
# each filter in the list is a filter operation (equals, not equals, etc)
# filters with the same name will appear as operations under the same filter
column_filters = [
FilterEqual(User.last_name, 'Last Name'),
FilterLastNameBrown(
User.last_name, 'Last Name', options=(('1', 'Yes'), ('0', 'No'))
)
]
admin = Admin(app, template_mode="bootstrap3")
admin.add_view(UserAdmin(User, db.session))
def build_sample_db():
db.drop_all()
db.create_all()
user_obj1 = User("Paul", "Brown", "pbrown", "paul@gmail.com")
user_obj2 = User("Luke", "Brown", "lbrown", "luke@gmail.com")
user_obj3 = User("Serge", "Koval", "skoval", "serge@gmail.com")
db.session.add_all([user_obj1, user_obj2, user_obj3])
db.session.commit()
if __name__ == '__main__':
build_sample_db()
app.run(port=5000, debug=True)
|
supamii/libuv.qt | refs/heads/master | lib/gmock/gtest/test/gtest_test_utils.py | 674 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary "%s". Please make sure to provide\n'
'a path to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.' % path)
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
CTSRD-SOAAP/chromium-42.0.2311.135 | refs/heads/master | tools/telemetry/telemetry/core/backends/chrome_inspector/inspector_websocket_unittest.py | 11 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.backends.chrome_inspector import inspector_websocket
from telemetry.core.backends.chrome_inspector import websocket
from telemetry.unittest_util import simple_mock
class FakeSocket(object):
"""A fake websocket that allows test to send random data."""
def __init__(self, mock_timer):
self._mock_timer = mock_timer
self._responses = []
self._timeout = None
def AddResponse(self, response, time):
if self._responses:
assert self._responses[-1][1] < time, (
'Current response is scheduled earlier than previous response.')
self._responses.append((response, time))
def recv(self):
if not self._responses:
raise Exception('No more recorded responses.')
response, time = self._responses.pop(0)
current_time = self._mock_timer.time()
if self._timeout is not None and time - current_time > self._timeout:
self._mock_timer.SetTime(current_time + self._timeout + 1)
raise websocket.WebSocketTimeoutException()
self._mock_timer.SetTime(time)
return response
def settimeout(self, timeout):
self._timeout = timeout
def _ReraiseExceptionErrorHandler(_elapsed_time):
raise
def _DoNothingHandler(_elapsed_time):
pass
class InspectorWebsocketUnittest(unittest.TestCase):
def setUp(self):
self._mock_timer = simple_mock.MockTimer(inspector_websocket)
def tearDown(self):
self._mock_timer.Restore()
def testDispatchNotification(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
# pylint: disable=protected-access
inspector._socket = fake_socket
results = []
def OnTestEvent(result):
results.append(result)
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
fake_socket.AddResponse('{"method": "Test.foo"}', 5)
inspector.DispatchNotifications()
self.assertEqual(1, len(results))
self.assertEqual('Test.foo', results[0]['method'])
def testDispatchNotificationTimedOut(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
# pylint: disable=protected-access
inspector._socket = fake_socket
results = []
def OnTestEvent(result):
results.append(result)
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
fake_socket.AddResponse('{"method": "Test.foo"}', 11)
with self.assertRaises(
websocket.WebSocketTimeoutException):
inspector.DispatchNotifications(timeout=10)
self.assertEqual(0, len(results))
def testDispatchNotificationUntilDoneTimedOut2(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
inspector._socket = fake_socket # pylint: disable=W0212
results = []
def OnTestEvent(result):
results.append(result)
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
# The third call to socket.recv() will take 15 seconds without any data
# received, hence the below call will raise a
# DispatchNotificationsUntilDoneTimeoutException.
fake_socket.AddResponse('{"method": "Test.foo"}', 10)
fake_socket.AddResponse('{"method": "Test.bar"}', 20)
fake_socket.AddResponse('{"method": "Test.baz"}', 35)
with self.assertRaises(
inspector_websocket.DispatchNotificationsUntilDoneTimeoutException):
inspector.DispatchNotificationsUntilDone(12)
self.assertEqual(2, len(results))
def testDispatchNotificationsUntilDone(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
# pylint: disable=protected-access
inspector._socket = fake_socket
results = []
def OnTestEvent(result):
results.append(result)
return len(results) > 2
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
# Even though it takes 70 seconds to receive all the data, the call below
# will succeed since there are no interval which the previous data package
# received and the next failed data receiving attempt was greater than
# 30 seconds.
fake_socket.AddResponse('{"method": "Test.foo"}', 10)
fake_socket.AddResponse('{"method": "Test.bar"}', 20)
fake_socket.AddResponse('{"method": "Test.baz"}', 35)
fake_socket.AddResponse('{"method": "Test.qux"}', 50)
fake_socket.AddResponse('{"method": "Test.baz"}', 60)
fake_socket.AddResponse('{"method": "Test.foo"}', 70)
inspector.DispatchNotificationsUntilDone(31)
self.assertEqual(3, len(results))
self.assertEqual('Test.baz', results[2]['method'])
def testDispatchNotificationsUntilDoneTimedOut(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
# pylint: disable=protected-access
inspector._socket = fake_socket
results = []
def OnTestEvent(result):
results.append(result)
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
fake_socket.AddResponse('{"method": "Test.foo"}', 5)
fake_socket.AddResponse('{"method": "Test.bar"}', 16)
fake_socket.AddResponse('{"method": "Test.baz"}', 20)
with self.assertRaises(
inspector_websocket.DispatchNotificationsUntilDoneTimeoutException):
inspector.DispatchNotificationsUntilDone(10)
self.assertEqual(1, len(results))
def testUnregisterDomain(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
fake_socket = FakeSocket(self._mock_timer)
# pylint: disable=protected-access
inspector._socket = fake_socket
results = []
def OnTestEvent(result):
results.append(result)
inspector.RegisterDomain('Test', OnTestEvent, _DoNothingHandler)
inspector.RegisterDomain('Test2', OnTestEvent, _DoNothingHandler)
inspector.UnregisterDomain('Test')
fake_socket.AddResponse('{"method": "Test.foo"}', 5)
fake_socket.AddResponse('{"method": "Test2.foo"}', 10)
inspector.DispatchNotifications()
self.assertEqual(0, len(results))
inspector.DispatchNotifications()
self.assertEqual(1, len(results))
self.assertEqual('Test2.foo', results[0]['method'])
def testUnregisterDomainWithUnregisteredDomain(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
with self.assertRaises(AssertionError):
inspector.UnregisterDomain('Test')
def testRegisterDomainWillCloseHandler(self):
inspector = inspector_websocket.InspectorWebsocket(
error_handler=_ReraiseExceptionErrorHandler)
results = []
def OnClose():
results.append(1)
results2 = []
def OnClose2():
results2.append(1)
inspector.RegisterDomain('Test', _DoNothingHandler, OnClose)
inspector.RegisterDomain('Test2', _DoNothingHandler, OnClose2)
inspector.RegisterDomain('Test3', _DoNothingHandler)
inspector.Disconnect()
self.assertEqual(1, len(results))
self.assertEqual(1, len(results2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.