repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
CTSRD-SOAAP/chromium-42.0.2311.135
|
refs/heads/master
|
tools/perf/benchmarks/inbox_benchmark.py
|
3
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import inbox
from telemetry import benchmark
from telemetry.web_perf import timeline_based_measurement
@benchmark.Disabled('android', 'reference', 'xp') # http://crbug.com/452257
class Inbox(benchmark.Benchmark):
"""Runs the timeline based measurement against inbox pageset."""
test = timeline_based_measurement.TimelineBasedMeasurement
def CreatePageSet(self, options):
return inbox.InboxPageSet()
|
bcornwellmott/frappe
|
refs/heads/develop
|
frappe/patches/v7_2/update_communications.py
|
16
|
import frappe
def execute():
"""
in communication move feedback details to content
remove Guest None from sender full name
setup feedback request trigger's is_manual field
"""
frappe.reload_doc('core', 'doctype', 'dynamic_link')
frappe.reload_doc('email', 'doctype', 'contact')
frappe.reload_doc("core", "doctype", "feedback_request")
frappe.reload_doc("core", "doctype", "communication")
if frappe.db.has_column('Communication', 'feedback'):
frappe.db.sql("""update tabCommunication set content=ifnull(feedback, "feedback details not provided")
where communication_type="Feedback" and content is NULL""")
frappe.db.sql(""" update tabCommunication set sender_full_name="" where communication_type="Feedback"
and sender_full_name='Guest None' """)
frappe.db.sql(""" update `tabFeedback Request` set is_manual=1, feedback_trigger="Manual"
where ifnull(feedback_trigger, '')='' """)
|
smspillaz/pychecker
|
refs/heads/master
|
test_input/test55.py
|
11
|
'd'
def x():
try :
print "howdy, this ain't right"
except KeyError, RuntimeError :
pass
def y():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except (KeyError, RuntimeError) :
pass
def z():
try :
pass
except (KeyError, RuntimeError, IndexError) :
pass
def a():
try :
pass
except (KeyError, RuntimeError, IndexError), a :
print a
try :
pass
except KeyError, RuntimeError :
pass
try :
pass
except (KeyError, RuntimeError) :
pass
def b():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except (KeyError, RuntimeError), msg :
print msg
def c():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except KeyError, detail :
print detail
|
konstruktoid/ansible-upstream
|
refs/heads/devel
|
lib/ansible/modules/net_tools/ldap/__init__.py
|
12133432
| |
diegojromerolopez/djanban
|
refs/heads/master
|
src/djanban/apps/niko_niko_calendar/__init__.py
|
12133432
| |
iogf/candocabot
|
refs/heads/master
|
plugins/router/__init__.py
|
12133432
| |
houzhenggang/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/mac_iceland.py
|
593
|
""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
aberke/check-list
|
refs/heads/master
|
app/models/receipt.py
|
2
|
#********************************************************************************
#--------------------------------------------------------------------------------
#
# Significance Labs
# Brooklyn, NYC
#
# Author: Alexandra Berke (aberke)
# Written: Summer 2014
#
#
# Receipt Model
# a receipt is a static snapshot of a list at a given time
# it has a date (when created/sent to client)
# instead of referring to specific room and task models, it stores all data as embedded documents
# since this data cannot change
#
# Lists may be deleted, but receipts live forever
# When a list is deleted, instead of deleting receipt,
# receipt updated such that _list=null
#
#
# _id {ObjectId}
# _cleaner {ObjectId} -- cleaner._id of owner cleaner
# _list {ObjectId} -- list._id of owner list -- set to null when list deleted
# date {Date} -- set at time of creation
# phonenumber {String}
# location {String}
# notes {String}
# price {Integer} -- Integer not enforced server side
# rooms [{
# name {String}
# type {String} <- icon etc
# count {Number} <- front end defaults to 1 if not set
# tasks [{
# name {String}
# },{
# ...
# }]
# },{
# ...
# }]
#
#--------------------------------------------------------------------------------
#*********************************************************************************
import copy
from datetime import datetime
from app.database import db
from .model_utility import sanitize_id
import cleaner
def find(id=None):
query = {}
if id:
query['_id'] = sanitize_id(id)
receipts = [r for r in db.receipts.find(query)]
for r in receipts:
r['cleaner'] = cleaner.find_public(id=r['_cleaner'])
return receipts
def find_one(**kwargs):
r = find(**kwargs)
return r[0] if r else None
def mark_list_deleted(id):
"""
NOT USED
When a list is deleted, instead of deleting receipt, it notifies receipt
Receipt then updates _list to be null
"""
return db.receipts.update({ "_id": sanitize_id(id) }, { "$set": {"_list": None }})
def create(list):
"""
This method should only be called from list.create_receipt
@param {dict} list model object fully populated with rooms that are fully populated with tasks
Returns _id of newly inserted receipt
Takes snapshot of list and creates receipt
"""
receipt_data = copy.deepcopy(list)
del receipt_data['_id']
del receipt_data['last_modified']
receipt_data['_list'] = sanitize_id(list['_id'])
# add date
receipt_data['date'] = str(datetime.utcnow())
# insert into database
receipt_id = db.receipts.insert(receipt_data)
return receipt_id
|
gtara/or-tools
|
refs/heads/master
|
examples/python/broken_weights.py
|
32
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Broken weights problem in Google CP Solver.
From http://www.mathlesstraveled.com/?p=701
'''
Here's a fantastic problem I recently heard. Apparently it was first
posed by Claude Gaspard Bachet de Meziriac in a book of arithmetic problems
published in 1612, and can also be found in Heinrich Dorrie's 100
Great Problems of Elementary Mathematics.
A merchant had a forty pound measuring weight that broke
into four pieces as the result of a fall. When the pieces were
subsequently weighed, it was found that the weight of each piece
was a whole number of pounds and that the four pieces could be
used to weigh every integral weight between 1 and 40 pounds. What
were the weights of the pieces?
Note that since this was a 17th-century merchant, he of course used a
balance scale to weigh things. So, for example, he could use a 1-pound
weight and a 4-pound weight to weigh a 3-pound object, by placing the
3-pound object and 1-pound weight on one side of the scale, and
the 4-pound weight on the other side.
'''
Compare with the following problems:
* MiniZinc: http://www.hakank.org/minizinc/broken_weights.mzn
* ECLiPSE: http://www.hakank.org/eclipse/broken_weights.ecl
* Gecode: http://www.hakank.org/gecode/broken_weights.cpp
* Comet: http://hakank.org/comet/broken_weights.co
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
import string
from ortools.constraint_solver import pywrapcp
def main(m=40, n=4):
# Create the solver.
solver = pywrapcp.Solver('Broken weights')
#
# data
#
print 'total weight (m):', m
print 'number of pieces (n):', n
print
#
# variables
#
weights = [solver.IntVar(1, m, 'weights[%i]' % j) for j in range(n)]
x = {}
for i in range(m):
for j in range(n):
x[i, j] = solver.IntVar(-1, 1, 'x[%i,%i]' % (i, j))
x_flat = [x[i, j] for i in range(m) for j in range(n)]
#
# constraints
#
# symmetry breaking
for j in range(1, n):
solver.Add(weights[j - 1] < weights[j])
solver.Add(solver.SumEquality(weights, m))
# Check that all weights from 1 to 40 can be made.
#
# Since all weights can be on either side
# of the side of the scale we allow either
# -1, 0, or 1 or the weights, assuming that
# -1 is the weights on the left and 1 is on the right.
#
for i in range(m):
solver.Add(i + 1 == solver.Sum([weights[j] * x[i, j]
for j in range(n)]))
# objective
objective = solver.Minimize(weights[n - 1], 1)
#
# search and result
#
db = solver.Phase(weights + x_flat,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
search_log = solver.SearchLog(1)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'weights: ',
for w in [weights[j].Value() for j in range(n)]:
print '%3i ' % w,
print
print '-' * 30
for i in range(m):
print 'weight %2i:' % (i + 1),
for j in range(n):
print '%3i ' % x[i, j].Value(),
print
print
print
solver.EndSearch()
print 'num_solutions:', num_solutions
print 'failures :', solver.Failures()
print 'branches :', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
m = 40
n = 4
if __name__ == '__main__':
if len(sys.argv) > 1:
m = string.atoi(sys.argv[1])
if len(sys.argv) > 2:
n = string.atoi(sys.argv[2])
main(m, n)
|
absoludity/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/request.py
|
87
|
import base64
import cgi
import Cookie
import StringIO
import tempfile
import urlparse
from . import stash
from .utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: headers
List of request headers.
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: body
Request body as a string
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlparse.urlsplit(self.url)
self._raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self._raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
|
DrChat/thermoctrl
|
refs/heads/master
|
templog/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
nishad-jobsglobal/odoo-marriot
|
refs/heads/master
|
addons/crm_mass_mailing/mass_mailing.py
|
55
|
from openerp.osv import osv
class MassMailing(osv.Model):
_name = 'mail.mass_mailing'
_inherit = ['mail.mass_mailing', 'crm.tracking.mixin']
def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None):
res = super(MassMailing, self).on_change_model_and_list(cr, uid, ids, mailing_model, list_ids, context=context)
if mailing_model == 'crm.lead':
res = res or {}
values = {'mailing_domain': "[('opt_out', '=', False)]"}
res = dict(res, value=dict(res.get('value', {}), **values))
return res
|
yongshengwang/builthue
|
refs/heads/master
|
desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/exceptions.py
|
45
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2008-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Exception classes thrown by OpenID Authentication and Validation."""
class DjangoOpenIDException(Exception):
pass
class RequiredAttributeNotReturned(DjangoOpenIDException):
pass
class IdentityAlreadyClaimed(DjangoOpenIDException):
def __init__(self, message=None):
if message is None:
self.message = "Another user already exists for your selected OpenID"
else:
self.message = message
class DuplicateUsernameViolation(DjangoOpenIDException):
def __init__(self, message=None):
if message is None:
self.message = "Your desired username is already being used."
else:
self.message = message
class MissingUsernameViolation(DjangoOpenIDException):
def __init__(self, message=None):
if message is None:
self.message = "No nickname given for your account."
else:
self.message = message
class MissingPhysicalMultiFactor(DjangoOpenIDException):
def __init__(self, message=None):
if message is None:
self.message = "Login requires physical multi-factor authentication."
else:
self.message = message
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.11.29/django/contrib/staticfiles/testing.py
|
584
|
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.test import LiveServerTestCase
class StaticLiveServerTestCase(LiveServerTestCase):
"""
Extends django.test.LiveServerTestCase to transparently overlay at test
execution-time the assets provided by the staticfiles app finders. This
means you don't need to run collectstatic before or as a part of your tests
setup.
"""
static_handler = StaticFilesHandler
|
cyyber/QRL
|
refs/heads/master
|
tests/core/txs/test_MessageTransactionStateChanges.py
|
2
|
from unittest import TestCase
from mock import Mock
from qrl.core import config
from qrl.core.Indexer import Indexer
from qrl.core.misc import logger
from qrl.core.State import State
from qrl.core.StateContainer import StateContainer
from qrl.core.OptimizedAddressState import OptimizedAddressState
from qrl.core.ChainManager import ChainManager
from qrl.core.txs.MessageTransaction import MessageTransaction
from tests.misc.helper import get_alice_xmss, set_qrl_dir
logger.initialize_default()
class TestMessageTransactionStateChanges(TestCase):
def setUp(self):
with set_qrl_dir('no_data'):
self.state = State()
self.alice = get_alice_xmss()
alice_address_state = OptimizedAddressState.get_default(self.alice.address)
alice_address_state.pbdata.balance = 100
self.addresses_state = {
self.alice.address: alice_address_state
}
self.params = {
"message_hash": b'Test Message',
"addr_to": None,
"fee": 1,
"xmss_pk": self.alice.pk
}
self.unused_chain_manager_mock = Mock(autospec=ChainManager, name='unused ChainManager')
def test_apply_message_txn(self):
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
addresses_state = dict(self.addresses_state)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
tx.apply(self.state, state_container)
self.assertEqual(addresses_state[self.alice.address].balance, 99)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertIn(storage_key, state_container.paginated_tx_hash.key_value)
self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key])
def test_revert_message_txn(self):
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
addresses_state = dict(self.addresses_state)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
tx.apply(self.state, state_container)
tx.revert(self.state, state_container)
self.assertEqual(addresses_state[self.alice.address].balance, 100)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertIn(storage_key, state_container.paginated_tx_hash.key_value)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
def test_validate_tx(self):
tx = MessageTransaction.create(**self.params)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
tx._data.transaction_hash = b'abc'
# Should fail, as we have modified with invalid transaction_hash
with self.assertRaises(ValueError):
tx.validate_or_raise()
|
IndonesiaX/edx-platform
|
refs/heads/master
|
common/djangoapps/util/disable_rate_limit.py
|
164
|
"""Utilities for disabling Django Rest Framework rate limiting.
This is useful for performance tests in which we need to generate
a lot of traffic from a particular IP address. By default,
Django Rest Framework uses the IP address to throttle traffic
for users who are not authenticated.
To disable rate limiting:
1) Decorate the Django Rest Framework APIView with `@can_disable_rate_limit`
2) In Django's admin interface, set `RateLimitConfiguration.enabled` to False.
Note: You should NEVER disable rate limiting in production.
"""
from functools import wraps
import logging
from rest_framework.views import APIView
from util.models import RateLimitConfiguration
LOGGER = logging.getLogger(__name__)
def _check_throttles_decorator(func):
"""Decorator for `APIView.check_throttles`.
The decorated function will first check model-based config
to see if rate limiting is disabled; if so, it skips
the throttle check. Otherwise, it calls the original
function to enforce rate-limiting.
Arguments:
func (function): The function to decorate.
Returns:
The decorated function.
"""
@wraps(func)
def _decorated(*args, **kwargs):
# Skip the throttle check entirely if we've disabled rate limiting.
# Otherwise, perform the checks (as usual)
if RateLimitConfiguration.current().enabled:
return func(*args, **kwargs)
else:
msg = "Rate limiting is disabled because `RateLimitConfiguration` is not enabled."
LOGGER.info(msg)
return
return _decorated
def can_disable_rate_limit(clz):
"""Class decorator that allows rate limiting to be disabled.
Arguments:
clz (class): The APIView subclass to decorate.
Returns:
class: the decorated class.
Example Usage:
>>> from rest_framework.views import APIView
>>> @can_disable_rate_limit
>>> class MyApiView(APIView):
>>> pass
"""
# No-op if the class isn't a Django Rest Framework view.
if not issubclass(clz, APIView):
msg = (
u"{clz} is not a Django Rest Framework APIView subclass."
).format(clz=clz)
LOGGER.warning(msg)
return clz
# If we ARE explicitly disabling rate limiting,
# modify the class to always allow requests.
# Note that this overrides both rate limiting applied
# for the particular view, as well as global rate limits
# configured in Django settings.
if hasattr(clz, 'check_throttles'):
clz.check_throttles = _check_throttles_decorator(clz.check_throttles)
return clz
|
olafhauk/mne-python
|
refs/heads/master
|
tutorials/machine-learning/plot_receptive_field.py
|
8
|
# -*- coding: utf-8 -*-
"""
=====================================================================
Spectro-temporal receptive field (STRF) estimation on continuous data
=====================================================================
This demonstrates how an encoding model can be fit with multiple continuous
inputs. In this case, we simulate the model behind a spectro-temporal receptive
field (or STRF). First, we create a linear filter that maps patterns in
spectro-temporal space onto an output, representing neural activity. We fit
a receptive field model that attempts to recover the original linear filter
that was used to create this data.
References
----------
Estimation of spectro-temporal and spatio-temporal receptive fields using
modeling with continuous inputs is described in:
.. [1] Theunissen, F. E. et al. Estimating spatio-temporal receptive
fields of auditory and visual neurons from their responses to
natural stimuli. Network 12, 289-316 (2001).
.. [2] Willmore, B. & Smyth, D. Methods for first-order kernel
estimation: simple-cell receptive fields from responses to
natural scenes. Network 14, 553-77 (2003).
.. [3] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016).
The Multivariate Temporal Response Function (mTRF) Toolbox:
A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli.
Frontiers in Human Neuroscience 10, 604.
doi:10.3389/fnhum.2016.00604
.. [4] Holdgraf, C. R. et al. Rapid tuning shifts in human auditory cortex
enhance speech intelligibility. Nature Communications, 7, 13654 (2016).
doi:10.1038/ncomms13654
"""
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
###############################################################################
# Load audio data
# ---------------
#
# We'll read in the audio data from [3]_ in order to simulate a response.
#
# In addition, we'll downsample the data along the time dimension in order to
# speed up computation. Note that depending on the input values, this may
# not be desired. For example if your input stimulus varies more quickly than
# 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
###############################################################################
# Create a receptive field
# ------------------------
#
# We'll simulate a linear receptive field for a theoretical neural signal. This
# defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Simulate a neural response
# --------------------------
#
# Using this receptive field, we'll create an artificial neural response to
# a stimulus.
#
# To do this, we'll create a time-delayed version of the receptive field, and
# then calculate the dot product between this and the stimulus. Note that this
# is effectively doing a convolution between the stimulus and the receptive
# field. See `here <https://en.wikipedia.org/wiki/Convolution>`_ for more
# information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
###############################################################################
# Fit a model to recover this receptive field
# -------------------------------------------
#
# Finally, we'll use the :class:`mne.decoding.ReceptiveField` class to recover
# the linear receptive field of this signal. Note that properties of the
# receptive field (e.g. smoothness) will depend on the autocorrelation in the
# inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Visualize the effects of regularization
# ---------------------------------------
#
# Above we fit a :class:`mne.decoding.ReceptiveField` model for one of many
# values for the ridge regularization parameter. Here we will plot the model
# score as well as the model coefficients for each value, in order to
# visualize how coefficients change with different levels of regularization.
# These issues as well as the STRF pipeline are described in detail
# in [1]_, [2]_, and [4]_.
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
###############################################################################
# Using different regularization types
# ------------------------------------
# In addition to the standard ridge regularization, the
# :class:`mne.decoding.TimeDelayingRidge` class also exposes
# `Laplacian <https://en.wikipedia.org/wiki/Laplacian_matrix>`_ regularization
# term as:
#
# .. math::
# \left[\begin{matrix}
# 1 & -1 & & & & \\
# -1 & 2 & -1 & & & \\
# & -1 & 2 & -1 & & \\
# & & \ddots & \ddots & \ddots & \\
# & & & -1 & 2 & -1 \\
# & & & & -1 & 1\end{matrix}\right]
#
# This imposes a smoothness constraint of nearby time samples and/or features.
# Quoting [3]_:
#
# Tikhonov [identity] regularization (Equation 5) reduces overfitting by
# smoothing the TRF estimate in a way that is insensitive to
# the amplitude of the signal of interest. However, the Laplacian
# approach (Equation 6) reduces off-sample error whilst preserving
# signal amplitude (Lalor et al., 2006). As a result, this approach
# usually leads to an improved estimate of the system’s response (as
# indexed by MSE) compared to Tikhonov regularization.
#
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
###############################################################################
# Compare model performance
# -------------------------
# Below we visualize the model performance of each regularization method
# (ridge vs. Laplacian) for different levels of alpha. As you can see, the
# Laplacian method performs better in general, because it imposes a smoothness
# constraint along the time and feature dimensions of the coefficients.
# This matches the "true" receptive field structure and results in a better
# model fit.
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
###############################################################################
# Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
|
cangencer/hazelcast-python-client
|
refs/heads/master
|
hazelcast/protocol/codec/atomic_reference_apply_codec.py
|
2
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.atomic_reference_message_type import *
REQUEST_TYPE = ATOMICREFERENCE_APPLY
RESPONSE_TYPE = 105
RETRYABLE = False
def calculate_size(name, function):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(function)
return data_size
def encode_request(name, function):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, function))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_data(function)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response=None
if not client_message.read_bool():
parameters['response'] = to_object(client_message.read_data())
return parameters
|
jburger424/MediaQueueHCI
|
refs/heads/dev
|
m-q-env/lib/python3.4/site-packages/sqlalchemy/connectors/zxJDBC.py
|
55
|
# connectors/zxJDBC.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
from . import Connector
class ZxJDBCConnector(Connector):
driver = 'zxjdbc'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_unicode_binds = True
supports_unicode_statements = sys.version > '2.5.0+'
description_encoding = None
default_paramstyle = 'qmark'
jdbc_db_name = None
jdbc_driver_name = None
@classmethod
def dbapi(cls):
from com.ziclix.python.sql import zxJDBC
return zxJDBC
def _driver_kwargs(self):
"""Return kw arg dict to be sent to connect()."""
return {}
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
url.port is not None
and ':%s' % url.port or '',
url.database)
def create_connect_args(self, url):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[self._create_jdbc_url(url),
url.username, url.password,
self.jdbc_driver_name],
opts]
def is_disconnect(self, e, connection, cursor):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)
return 'connection is closed' in e or 'cursor is closed' in e
def _get_server_version_info(self, connection):
# use connection.connection.dbversion, and parse appropriately
# to get a tuple
raise NotImplementedError()
|
tradel/AppDynamicsREST
|
refs/heads/master
|
examples/license_count_by_env.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sample script to generate a count of licenses used by environment type (Prod, Devl, Qual, Cert, etc.)
"""
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
__version__ = '0.4.5'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
node.app = app
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
app_name = first_node.app.name
env = 'Production'
if 'Devl' in app_name:
env = 'Development'
if 'Qual' in app_name:
env = 'Qual'
if 'Cert' in app_name:
env = 'Cert'
all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# assert all_same, first_node
all_same = all(x.app.name == app_name for x in nodes_on_machine)
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, env, license_count)
incr(host_counts, env, 1)
incr(node_counts, env, len(nodes_on_machine))
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s %-15s %-15s %s'
data_fmt = '%-30s %15d %15d %15d'
print()
print('License usage report for ', args.url)
print('Generated at: ', datetime.now())
print()
print(header_fmt % ('Environment', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for env in sorted(node_counts.keys()):
node_count = node_counts.get(env, 0)
host_count = host_counts.get(env, 0)
lic_count = lic_counts.get(env, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (env, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
|
tempbottle/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_importlib/source/test_path_hook.py
|
84
|
from .. import util
from . import util as source_util
machinery = util.import_importlib('importlib.machinery')
import unittest
class PathHookTest:
"""Test the path hook for source."""
def path_hook(self):
return self.machinery.FileFinder.path_hook((self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
def test_success(self):
with source_util.create_modules('dummy') as mapping:
self.assertTrue(hasattr(self.path_hook()(mapping['.root']),
'find_module'))
def test_empty_string(self):
# The empty string represents the cwd.
self.assertTrue(hasattr(self.path_hook()(''), 'find_module'))
Frozen_PathHookTest, Source_PathHooktest = util.test_both(PathHookTest, machinery=machinery)
if __name__ == '__main__':
unittest.main()
|
chrivers/pyjaco
|
refs/heads/master
|
tests/list/in.py
|
5
|
l = ['a','b','c']
def intest(item,list):
if item in list:
print str(item) + ' is in list'
else:
print str(item) + ' is not in list'
intest('a',l)
intest('b',l)
intest(99,l)
intest(0,l)
intest('z',l)
intest('c',l)
|
vjmac15/Lyilis
|
refs/heads/master
|
lib/youtube_dl/extractor/amp (VJ Washington's conflicted copy 2017-08-29).py
|
28
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
mimetype2ext,
determine_ext,
ExtractorError,
)
class AMPIE(InfoExtractor):
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
feed = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed')
item = feed.get('channel', {}).get('item')
if not item:
raise ExtractorError('%s said: %s' % (self.IE_NAME, feed['error']))
video_id = item['guid']
def get_media_node(name, default=None):
media_name = 'media-%s' % name
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data.get('@attributes', {})
thumbnail_url = thumbnail.get('url')
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url, 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data.get('@attributes', {})
subtitle_href = subtitle.get('href')
if not subtitle_href:
continue
subtitles.setdefault(subtitle.get('lang') or 'en', []).append({
'url': subtitle_href,
'ext': mimetype2ext(subtitle.get('type')) or determine_ext(subtitle_href),
})
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = media.get('url')
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media['url'],
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
self._sort_formats(formats)
timestamp = parse_iso8601(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
|
Winand/pandas
|
refs/heads/master
|
pandas/core/indexes/__init__.py
|
12133432
| |
rexshihaoren/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/tests/__init__.py
|
12133432
| |
computersalat/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/network/__init__.py
|
12133432
| |
ericdill/bokeh
|
refs/heads/master
|
bokeh/charts/builder/tests/__init__.py
|
12133432
| |
Digas29/bazel
|
refs/heads/master
|
tools/j2objc/j2objc_wrapper.py
|
3
|
#!/usr/bin/python2.7
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper script for J2ObjC transpiler.
This script wraps around J2ObjC transpiler to also output a dependency mapping
file by scanning the import and include directives of the J2ObjC-translated
files.
"""
import argparse
import multiprocessing
import os
import Queue
import re
import subprocess
import tempfile
import threading
INCLUDE_RE = re.compile('#(include|import) "([^"]+)"')
def RunJ2ObjC(java, jvm_flags, j2objc, main_class, j2objc_args):
"""Runs J2ObjC transpiler to translate Java source files to ObjC.
Args:
java: The path of the Java executable.
jvm_flags: A comma-separated list of flags to pass to JVM.
j2objc: The deploy jar of J2ObjC.
main_class: The J2ObjC main class to invoke.
j2objc_args: A list of args to pass to J2ObjC transpiler.
Returns:
None.
"""
source_files, flags = _ParseArgs(j2objc_args)
source_file_manifest_content = ' '.join(source_files)
fd = None
param_filename = None
try:
fd, param_filename = tempfile.mkstemp(text=True)
os.write(fd, source_file_manifest_content)
finally:
if fd:
os.close(fd)
try:
j2objc_cmd = [java]
j2objc_cmd.extend(filter(None, jvm_flags.split(',')))
j2objc_cmd.extend(['-cp', j2objc, main_class])
j2objc_cmd.extend(flags)
j2objc_cmd.extend(['@%s' % param_filename])
subprocess.check_call(j2objc_cmd, stderr=subprocess.STDOUT)
finally:
if param_filename:
os.remove(param_filename)
def WriteDepMappingFile(translated_source_files,
objc_file_path,
output_dependency_mapping_file,
file_open=open):
"""Scans J2ObjC-translated files and outputs a dependency mapping file.
The mapping file contains mappings between translated source files and their
imported source files scanned from the import and include directives.
Args:
translated_source_files: A comma-separated list of files translated by
J2ObjC.
objc_file_path: The file path which represents a directory where the
generated ObjC files reside.
output_dependency_mapping_file: The path of the dependency mapping file to
write to.
file_open: Reference to the builtin open function so it may be
overridden for testing.
Raises:
RuntimeError: If spawned threads throw errors during processing.
Returns:
None.
"""
dep_mapping = dict()
input_file_queue = Queue.Queue()
output_dep_mapping_queue = Queue.Queue()
error_message_queue = Queue.Queue()
for output_file in translated_source_files.split(','):
input_file_queue.put(output_file)
for _ in xrange(multiprocessing.cpu_count()):
t = threading.Thread(target=_ReadDepMapping, args=(input_file_queue,
output_dep_mapping_queue,
error_message_queue,
objc_file_path,
file_open))
t.start()
input_file_queue.join()
if not error_message_queue.empty():
error_messages = [error_message for error_message in
error_message_queue.queue]
raise RuntimeError('\n'.join(error_messages))
while not output_dep_mapping_queue.empty():
entry_file, deps = output_dep_mapping_queue.get()
dep_mapping[entry_file] = deps
f = file_open(output_dependency_mapping_file, 'w')
for entry in sorted(dep_mapping):
for dep in dep_mapping[entry]:
f.write(entry + ':' + dep + '\n')
f.close()
def _ReadDepMapping(input_file_queue, output_dep_mapping_queue,
error_message_queue, objc_file_path, file_open=open):
while True:
try:
input_file = input_file_queue.get_nowait()
except Queue.Empty:
# No more work left in the queue.
return
try:
deps = []
entry = os.path.relpath(os.path.splitext(input_file)[0], objc_file_path)
with file_open(input_file, 'r') as f:
for line in f:
include = INCLUDE_RE.match(line)
if include:
include_path = include.group(2)
dep = os.path.splitext(include_path)[0]
if dep != entry:
deps.append(dep)
output_dep_mapping_queue.put((entry, deps))
except Exception as e: # pylint: disable=broad-except
error_message_queue.put(str(e))
finally:
# We need to mark the task done to prevent blocking the main process
# indefinitely.
input_file_queue.task_done()
def _ParseArgs(j2objc_args):
"""Separate arguments passed to J2ObjC into source files and J2ObjC flags.
Args:
j2objc_args: A list of args to pass to J2ObjC transpiler.
Returns:
A tuple containing source files and J2ObjC flags
"""
source_files = []
flags = []
is_next_flag_value = False
for j2objc_arg in j2objc_args:
if j2objc_arg.startswith('-'):
flags.append(j2objc_arg)
is_next_flag_value = True
elif is_next_flag_value:
flags.append(j2objc_arg)
is_next_flag_value = False
else:
source_files.append(j2objc_arg)
return (source_files, flags)
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument(
'--java',
required=True,
help='The path to the Java executable.')
parser.add_argument(
'--jvm_flags',
default='',
help='A comma-separated list of flags to pass to the JVM.')
parser.add_argument(
'--j2objc',
required=True,
help='The path to the J2ObjC deploy jar.')
parser.add_argument(
'--main_class',
required=True,
help='The main class of the J2ObjC deploy jar to execute.')
parser.add_argument(
'--translated_source_files',
required=True,
help=('A comma-separated list of file paths where J2ObjC will write the '
'translated files to.'))
parser.add_argument(
'--output_dependency_mapping_file',
required=True,
help='The file path of the dependency mapping file to write to.')
parser.add_argument(
'--objc_file_path',
required=True,
help=('The file path which represents a directory where the generated '
'ObjC files reside.'))
args, pass_through_args = parser.parse_known_args()
RunJ2ObjC(args.java,
args.jvm_flags,
args.j2objc,
args.main_class,
pass_through_args)
WriteDepMappingFile(args.translated_source_files,
args.objc_file_path,
args.output_dependency_mapping_file)
|
jgdwyer/ML-convection
|
refs/heads/master
|
sknn_jgd/backend/lasagne/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
from ... import backend
from .mlp import MultiLayerPerceptronBackend
# Register this implementation as the MLP backend.
backend.MultiLayerPerceptronBackend = MultiLayerPerceptronBackend
backend.name = 'lasagne'
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/_base/providers/torrent/publichd.py
|
14
|
from urlparse import parse_qs
import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import six
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
urls = {
'test': 'https://publichd.se',
'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s',
'search': 'https://publichd.se/index.php',
}
http_time_between_calls = 0
def search(self, movie, quality):
if not quality.get('hd', False):
return []
return super(Base, self).search(movie, quality)
def _search(self, media, quality, results):
query = self.buildUrl(media)
params = tryUrlencode({
'page': 'torrents',
'search': query,
'active': 1,
})
data = self.getHTMLData('%s?%s' % (self.urls['search'], params))
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'})
entries = results_table.find_all('tr')
for result in entries[2:len(entries) - 1]:
info_url = result.find(href = re.compile('torrent-details'))
download = result.find(href = re.compile('magnet:'))
if info_url and download:
url = parse_qs(info_url['href'])
results.append({
'id': url['id'][0],
'name': six.text_type(info_url.string),
'url': download['href'],
'detail_url': self.urls['detail'] % url['id'][0],
'size': self.parseSize(result.find_all('td')[7].string),
'seeders': tryInt(result.find_all('td')[4].string),
'leechers': tryInt(result.find_all('td')[5].string),
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
cache_key = 'publichd.%s' % item['id']
description = self.getCache(cache_key)
if not description:
try:
full_description = self.urlopen(item['detail_url'])
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'torrmain'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
except:
log.error('Failed getting more info for %s', item['name'])
description = ''
self.setCache(cache_key, description, timeout = 25920000)
item['description'] = description
return item
config = [{
'name': 'publichd',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'PublicHD',
'description': 'Public Torrent site with only HD content. See <a href="https://publichd.se/">PublicHD</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
|
blueboxgroup/keystone
|
refs/heads/master
|
keystone/contrib/revoke/controllers.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import timeutils
from keystone.common import controller
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
@dependency.requires('revoke_api')
class RevokeController(controller.V3Controller):
@controller.protected()
def list_revoke_events(self, context):
since = context['query_string'].get('since')
last_fetch = None
if since:
try:
last_fetch = timeutils.normalize_time(
timeutils.parse_isotime(since))
except ValueError:
raise exception.ValidationError(
message=_('invalid date format %s') % since)
events = self.revoke_api.get_events(last_fetch=last_fetch)
# Build the links by hand as the standard controller calls require ids
response = {'events': [event.to_dict() for event in events],
'links': {
'next': None,
'self': RevokeController.base_url(
context,
path=context['path']),
'previous': None}
}
return response
|
htcondor/htcondor
|
refs/heads/master
|
bindings/python/classad/__init__.py
|
1
|
# Copyright 2019 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as _logging
# SET UP NULL LOG HANDLER
_logger = _logging.getLogger(__name__)
_logger.setLevel(_logging.DEBUG)
_logger.addHandler(_logging.NullHandler())
from .classad import *
__version__ = version()
|
cmoutard/mne-python
|
refs/heads/master
|
examples/visualization/plot_clickable_image.py
|
4
|
"""
================================================================
Demonstration of how to use ClickableImage / generate_2d_layout.
================================================================
In this example, we open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we use generate_2d_layout to turn those xy positions into a layout
for use with plotting topo maps. In this way, you can take arbitrary xy
positions and turn them into a plottable layout.
"""
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.ndimage import imread
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage, add_background_image # noqa
from mne.channels import generate_2d_layout # noqa
print(__doc__)
# Set parameters and paths
plt.rcParams['image.cmap'] = 'gray'
im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif')
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data and click
im = imread(im_path)
plt.imshow(im)
"""
This code opens the image so you can click on it. Commented out
because we've stored the clicks as a layout file already.
# The click coordinates are stored as a list of tuples
click = ClickableImage(im)
click.plot_clicks()
coords = click.coords
# Generate a layout from our clicks and normalize by the image
lt = generate_2d_layout(np.vstack(coords), bg_image=im)
lt.save(layout_path + layout_name) # To save if we want
"""
# We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
# Create some fake data
nchans = len(lt.pos)
nepochs = 50
sr = 1000
nsec = 5
events = np.arange(nepochs).reshape([-1, 1])
events = np.hstack([events, np.zeros([nepochs, 2], dtype=int)])
data = np.random.randn(nepochs, nchans, sr * nsec)
info = mne.create_info(nchans, sr, ch_types='eeg')
epochs = mne.EpochsArray(data, info, events)
evoked = epochs.average()
# Using the native plot_topo function with the image plotted in the background
f = evoked.plot_topo(layout=lt, fig_background=im)
|
40023255/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/browser/object_storage.py
|
627
|
import pickle
class __UnProvided():
pass
class ObjectStorage():
def __init__(self, storage):
self.storage = storage
def __delitem__(self, key):
del self.storage[pickle.dumps(key)]
def __getitem__(self, key):
return pickle.loads(self.storage[pickle.dumps(key)])
def __setitem__(self, key, value):
self.storage[pickle.dumps(key)] = pickle.dumps(value)
def __contains__(self, key):
return pickle.dumps(key) in self.storage
def get(self, key, default=None):
if pickle.dumps(key) in self.storage:
return self.storage[pickle.dumps(key)]
return default
def pop(self, key, default=__UnProvided()):
if type(default) is __UnProvided or pickle.dumps(key) in self.storage:
return pickle.loads(self.storage.pop(pickle.dumps(key)))
return default
def __iter__(self):
keys = self.keys()
return keys.__iter__()
def keys(self):
return [pickle.loads(key) for key in self.storage.keys()]
def values(self):
return [pickle.loads(val) for val in self.storage.values()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
self.storage.clear()
def __len__(self):
return len(self.storage)
|
jameswatt2008/jameswatt2008.github.io
|
refs/heads/master
|
python/Python核心编程/网络编程/截图和代码/概述、SOCKET/多进程copy文件/test-复件/_pyio.py
|
3
|
"""
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import array
import stat
import sys
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, OSError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too. In
some cases (such as readinto), a writable object is required. Text I/O
classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError("size must be an integer")
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=None):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=None):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except Exception:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
self._buffer.clear()
super().close()
def read(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
raise ValueError("number of bytes to read must be positive")
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=None):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2)
self.close()
def __getstate__(self):
raise TypeError("cannot serialize '%s' object", self.__class__.__name__)
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except Exception:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except Exception:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
decoder = self._decoder or self._get_decoder()
try:
size.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError("size must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
tellesnobrega/horizon
|
refs/heads/master
|
horizon/decorators.py
|
85
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
General-purpose decorators for use with Horizon.
"""
import functools
from django.utils.decorators import available_attrs # noqa
from django.utils.translation import ugettext_lazy as _
def _current_component(view_func, dashboard=None, panel=None):
"""Sets the currently-active dashboard and/or panel on the request."""
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if dashboard:
request.horizon['dashboard'] = dashboard
if panel:
request.horizon['panel'] = panel
return view_func(request, *args, **kwargs)
return dec
def require_auth(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
from horizon.exceptions import NotAuthenticated # noqa
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
raise NotAuthenticated(_("Please log in to continue."))
return dec
def require_perms(view_func, required):
"""Enforces permission-based access controls.
:param list required: A tuple of permission names, all of which the request
user must possess in order access the decorated view.
Example usage::
from horizon.decorators import require_perms
@require_perms(['foo.admin', 'foo.member'])
def my_view(request):
...
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
requirements are not met.
"""
from horizon.exceptions import NotAuthorized # noqa
# We only need to check each permission once for a view, so we'll use a set
current_perms = getattr(view_func, '_required_perms', set([]))
view_func._required_perms = current_perms | set(required)
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
if request.user.has_perms(view_func._required_perms):
return view_func(request, *args, **kwargs)
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
# If we don't have any permissions, just return the original view.
if required:
return dec
else:
return view_func
|
Boldie/gourmet
|
refs/heads/master
|
gourmet/shopEditor.py
|
6
|
import gtk, gobject, backends.db, re, pickle
from gglobals import uibase
from gtk_extras import WidgetSaver
from gtk_extras import cb_extras as cb
from gtk_extras import dialog_extras as de
class ShopEditor:
"""ShopEditor sets up a GUI to allow editing which shopping
categories correspond to which items throughout the recipe
database. It is useful for corrections or changes to category info
en masse and for reordering shopping categories."""
def __init__ (self, rd=backends.db.recipeManager(), rg=None):
self.ui = gtk.Builder()
self.ui.add_from_file(os.path.join(uibase,'shopCatEditor.ui'))
self.rd = rd
self.rg = rg
self.prefs = self.rg.prefs
self.widget_names = ['treeview', 'searchByBox', 'searchEntry', 'searchButton', 'window',
'searchAsYouTypeToggle', 'regexpTog', 'deleteCatButton', 'addCatEntry',
'addCatButton']
for w in self.widget_names:
setattr(self,w,self.ui.get_object(w))
# setup entry callback to sensitize/desensitize apply
self.addCatButton.set_sensitive(False)
self.addCatEntry.connect('changed',self.addCatEntryChangedCB)
self.makeTreeModel()
self.search_string=""
self.treeModel.set_default_sort_func(self.sort_model_fun)
self.treeModel.set_sort_column_id(-1,gtk.SORT_ASCENDING)
self.filteredModel = self.treeModel.filter_new()
self.filteredModel.set_visible_func(self.filter_visibility_fun)
self.setupTreeView()
self.treeview.set_model(self.filteredModel)
self.treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#self.treeview.set_model(self.treeModel)
self.ui.connect_signals({
'iSearch':self.isearchCB,
'search':self.searchCB,
'search_as_you_type_toggle':self.search_as_you_typeCB,
'close_window': lambda *args: self.window.hide() and self.window.destroy(),
'catUp':self.catUpCB,
'catDown':self.catDownCB,
'catTop':self.catTopCB,
'catBottom':self.catTopCB,
})
# to set our regexp_toggled variable
self.searchByBox.set_active(0)
self.dont_ask = False
# setup WidgetSavers
self.rg.conf.append(WidgetSaver.WidgetSaver(
self.searchAsYouTypeToggle,
self.prefs.get('sautTog',
{'active':self.searchAsYouTypeToggle.get_active()}),
['toggled']))
self.rg.conf.append(WidgetSaver.WidgetSaver(
self.regexpTog,
self.prefs.get('regexpTog',
{'active':self.regexpTog.get_active()}),
['toggled']))
def dont_ask_cb (self, widget, *args):
self.dont_ask=widget.get_active()
def sort_model_fun (model, iter1, iter2, data):
c1 = model.get_value(iter1, self.CAT_COL)
if self.rg.sl.sh.catorder_dic.has_key(c1):
c1_order = self.rg.sl.sh.catorder_dic[c1]
else:
c1_order = None
c2 = model.get_value(iter2, self.CAT_COL)
if self.rg.sl.sh.catorder_dic.has_key(c1):
c2_order = self.rg.sl.sh.catorder_dic[c2]
else:
c2_order = None
if c1_order and c2_order:
compare = c1_order > c2_order
elif c1 and c2:
compare = c1 > c2
else:
k1 = model.get_value(iter1, self.KEY_COL)
k2 = model.get_value(iter2, self.KEY_COL)
if k1 and k2:
compare = k1 > k2
else:
compare = 0
# iter1 and iter2 are equal
if compare==0: return 0
# iter1 precedes iter2
if compare: return 1
# iter2 precedes iter1
else: return 1
def filter_visibility_fun (self, mod, iter):
if not self.search_string:
return True
str = mod.get_value(iter,self.search_by)
if not str and self.search_by==self.KEY_COL:
# then we need to make sure we show key header rows
# whose items include an item w/ the proper title...
cat = mod.get_value(iter,self.CAT_COL)
if self.cat_to_key.has_key(cat):
for itm in self.cat_to_key[cat]:
if self.use_regexp:
if re.search(self.search_string, itm): return True
elif itm.find(self.search_string) >= 0: return True
if self.use_regexp:
if re.search(self.search_string, str): return True
else:
if str.find(self.search_string) >= 0:
return True
def setupTreeView (self):
self.CAT_COL = 1
self.KEY_COL = 2
for n,head in [[self.CAT_COL,'Category'],
[self.KEY_COL,'Key'],
]:
renderer = gtk.CellRendererText()
renderer.set_property('editable',True)
renderer.connect('edited',self.tree_edited,n,head)
col = gtk.TreeViewColumn(head, renderer, text=n)
col.set_resizable(True)
self.treeview.append_column(col)
self.treeview.connect('row-expanded',self.populateChild)
def tree_edited (self, renderer, path_string, text, n, head):
indices = path_string.split(':')
path = tuple( map(int, indices))
iter = self.filteredModel.convert_iter_to_child_iter(self.filteredModel.get_iter(path))
key = self.treeModel.get_value(iter, self.KEY_COL)
item = self.treeModel.get_value(iter, self.ITEM_COL)
children = self.treeModel.iter_children(iter)
if n==self.KEY_COL and key==text: return
if n==self.ITEM_COL and item==text: return
## make sure they want to make this change
if not self.dont_ask:
msg = "Are you sure you want to change the "
if n==self.KEY_COL: msg += 'key'
if n==self.ITEM_COL: msg += 'item'
if item:
msg += "for \"%s from \"%s\""%(item,key)
else:
msg += " from \"%s\" "%key
msg += " to \"%s\""%text
if not de.getBoolean(label=msg,
dont_ask_cb=self.dont_ask_cb,
dont_ask_custom_text="Don't ask me before changing keys and items."):
return
if children and n==self.KEY_COL:
self.change_children(key, text, iter)
else:
if n==self.KEY_COL:
self.changeItem(key, item, new_key=text)
elif n==self.ITEM_COL:
self.changeItem(key, item, new_item=text)
self.treeModel.set_value(iter, n, text)
def change_children (self, key, new_key, iter):
# if it's children, it means we're changing a key for
# all cases... and then we just have to change the model
# so our user knows it worked
self.changeItem(key, new_key=new_key)
nn = 0
child = self.treeModel.iter_nth_child(iter,nn)
while child:
self.treeModel.set_value(child, self.KEY_COL, new_key)
nn += 1
child = self.treeModel.iter_nth_child(iter,nn)
def changeItem (self, key, item=None, new_key=None, new_item=None):
if item:
vw=self.rd.ingredients_table.select(key=key,item=item)
else:
vw=self.rd.ingredients_table.select(key=key)
for i in vw:
if new_key:
i.ingkey=new_key
self.rd.changed=True
if new_item:
i.item=new_item
self.rd.changed=True
def makeTreeModel (self):
self.treeModel = gtk.TreeStore(gobject.TYPE_PYOBJECT, str, str)
unique_cat_vw = self.rd.shopcats_table.groupby(self.rd.shopcats_table.category, 'groupvw')
self.cat_to_key={}
for c in unique_cat_vw:
iter=self.treeModel.append(None,[c, pickle.loads(c.category), ""])
self.cat_to_key[pickle.loads(c.category)]=[]
for i in c.groupvw:
#self.treeModel.append(iter,[i,pickle.loads(c.category),i.ingkey])
self.treeModel.append(iter,[i,pickle.loads(c.category),i.ingkey])
self.cat_to_key[pickle.loads(c.category)].append(i.ingkey)
def populateChild (self, tv, iter, path):
iter = self.filteredModel.convert_iter_to_child_iter(iter)
n = 0
child = self.treeModel.iter_nth_child(iter,n)
while child:
i = self.treeModel.get_value(child,0)
recipes = ""
for ii in i.itemgroup:
id = ii.id
r = self.rd.get_rec(ii.id)
if r: recipes += ", %s"%r.title
recipes = recipes[2:] # strip the first space
self.treeModel.set_value(child, 4, recipes)
n += 1
child = self.treeModel.iter_nth_child(iter,n)
def doSearch (self):
"""Do the actual searching."""
self.search_string = self.searchEntry.get_text()
search_by_str = cb.cb_get_active_text(self.searchByBox)
self.use_regexp = self.regexpTog.get_active()
if search_by_str == 'Key':
self.search_by = self.KEY_COL
else:
#print self.treeModel[-1][self.ITEM_COL]
self.search_by = self.CAT_COL
self.filteredModel.refilter()
def isearchCB (self, *args):
if self.searchAsYouTypeToggle.get_active():
self.doSearch()
def searchCB (self, *args):
self.doSearch()
def search_as_you_typeCB (self, *args):
if self.searchAsYouTypeToggle.get_active():
self.searchButton.hide()
else: self.searchButton.show()
def addCatEntryChangedCB (self, *args):
if self.addCatEntry.get_text():
self.addCatButton.set_sensitive(True)
else: self.addCatButton.set_sensitive(False)
def catUpCB (self, *args):
pass
def catDownCB (self, *args):
pass
def catTopCB (self, *args):
pass
def catBottomCB (self, *args):
pass
if __name__ == '__main__':
ke=ShopEditor()
gtk.main()
|
krishna-pandey-git/django
|
refs/heads/master
|
tests/gis_tests/geoadmin/models.py
|
300
|
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.encoding import python_2_unicode_compatible
from ..admin import admin
from ..models import models
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
class Meta:
app_label = 'geoadmin'
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
site = admin.AdminSite(name='admin_gis')
if HAS_GDAL:
site.register(City, admin.OSMGeoAdmin)
|
MobileWebApps/backend-python-rest-gae
|
refs/heads/master
|
django_filters/filters.py
|
6
|
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import timedelta
from django import forms
from django.db.models import Q
from django.db.models.sql.constants import QUERY_TERMS
from django.utils import six
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .fields import RangeField, LookupTypeField
__all__ = [
'Filter', 'CharFilter', 'BooleanFilter', 'ChoiceFilter',
'MultipleChoiceFilter', 'DateFilter', 'DateTimeFilter', 'TimeFilter',
'ModelChoiceFilter', 'ModelMultipleChoiceFilter', 'NumberFilter',
'RangeFilter', 'DateRangeFilter', 'AllValuesFilter',
]
LOOKUP_TYPES = sorted(QUERY_TERMS)
class Filter(object):
creation_counter = 0
field_class = forms.Field
def __init__(self, name=None, label=None, widget=None, action=None,
lookup_type='exact', required=False, distinct=False, **kwargs):
self.name = name
self.label = label
if action:
self.filter = action
self.lookup_type = lookup_type
self.widget = widget
self.required = required
self.extra = kwargs
self.distinct = distinct
self.creation_counter = Filter.creation_counter
Filter.creation_counter += 1
@property
def field(self):
if not hasattr(self, '_field'):
if (self.lookup_type is None or
isinstance(self.lookup_type, (list, tuple))):
if self.lookup_type is None:
lookup = [(x, x) for x in LOOKUP_TYPES]
else:
lookup = [
(x, x) for x in LOOKUP_TYPES if x in self.lookup_type]
self._field = LookupTypeField(self.field_class(
required=self.required, widget=self.widget, **self.extra),
lookup, required=self.required, label=self.label)
else:
self._field = self.field_class(required=self.required,
label=self.label, widget=self.widget, **self.extra)
return self._field
def filter(self, qs, value):
if isinstance(value, (list, tuple)):
lookup = six.text_type(value[1])
if not lookup:
lookup = 'exact' # fallback to exact if lookup is not provided
value = value[0]
else:
lookup = self.lookup_type
if value in ([], (), {}, None, ''):
return qs
qs = qs.filter(**{'%s__%s' % (self.name, lookup): value})
if self.distinct:
qs = qs.distinct()
return qs
class CharFilter(Filter):
field_class = forms.CharField
class BooleanFilter(Filter):
field_class = forms.NullBooleanField
def filter(self, qs, value):
if value is not None:
return qs.filter(**{self.name: value})
return qs
class ChoiceFilter(Filter):
field_class = forms.ChoiceField
class MultipleChoiceFilter(Filter):
"""
This filter preforms an OR query on the selected options.
"""
field_class = forms.MultipleChoiceField
def filter(self, qs, value):
value = value or ()
if len(value) == len(self.field.choices):
return qs
q = Q()
for v in value:
q |= Q(**{self.name: v})
return qs.filter(q).distinct()
class DateFilter(Filter):
field_class = forms.DateField
class DateTimeFilter(Filter):
field_class = forms.DateTimeField
class TimeFilter(Filter):
field_class = forms.TimeField
class ModelChoiceFilter(Filter):
field_class = forms.ModelChoiceField
class ModelMultipleChoiceFilter(MultipleChoiceFilter):
field_class = forms.ModelMultipleChoiceField
class NumberFilter(Filter):
field_class = forms.DecimalField
class RangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
lookup = '%s__range' % self.name
return qs.filter(**{lookup: (value.start, value.stop)})
return qs
_truncate = lambda dt: dt.replace(hour=0, minute=0, second=0)
class DateRangeFilter(ChoiceFilter):
options = {
'': (_('Any date'), lambda qs, name: qs.all()),
1: (_('Today'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
3: (_('This month'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
})),
4: (_('This year'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(DateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.name)
class AllValuesFilter(ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.name).values_list(self.name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super(AllValuesFilter, self).field
|
CI-WATER/TethysCluster
|
refs/heads/master
|
tethyscluster/tests/templates/__init__.py
|
2
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of TethysCluster.
#
# TethysCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TethysCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TethysCluster. If not, see <http://www.gnu.org/licenses/>.
|
mrry/tensorflow
|
refs/heads/windows
|
tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py
|
4
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + self._data.keys())
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
|
potatolondon/django-nonrel-1-4
|
refs/heads/master
|
django/contrib/localflavor/de/__init__.py
|
12133432
| |
350dotorg/Django
|
refs/heads/master
|
tests/regressiontests/cache/__init__.py
|
12133432
| |
Venturi/oldcms
|
refs/heads/master
|
env/lib/python2.7/site-packages/cms/test_utils/project/fakemlng/south_migrations/__init__.py
|
12133432
| |
abhiii5459/sympy
|
refs/heads/master
|
sympy/printing/pretty/tests/__init__.py
|
12133432
| |
resmo/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/aci/__init__.py
|
12133432
| |
tiagochiavericosta/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/views/component.py
|
57
|
from __future__ import absolute_import
import json
import logging
from django.http import HttpResponseBadRequest, Http404
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.core.exceptions import PermissionDenied
from django.conf import settings
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xblock.core import XBlock
from xblock.django.request import webob_to_django_response, django_to_webob_request
from xblock.exceptions import NoSuchHandlerError
from xblock.fields import Scope
from xblock.plugin import PluginMissingError
from xblock.runtime import Mixologist
from contentstore.utils import get_lms_link_for_item
from contentstore.views.helpers import get_parent_xblock, is_unit, xblock_type_display_name
from contentstore.views.item import create_xblock_info, add_container_page_publishing_info
from opaque_keys.edx.keys import UsageKey
from student.auth import has_course_author_access
from django.utils.translation import ugettext as _
from models.settings.course_grading import CourseGradingModel
__all__ = ['OPEN_ENDED_COMPONENT_TYPES',
'ADVANCED_COMPONENT_POLICY_KEY',
'container_handler',
'component_handler'
]
log = logging.getLogger(__name__)
# NOTE: it is assumed that this list is disjoint from ADVANCED_COMPONENT_TYPES
COMPONENT_TYPES = ['discussion', 'html', 'problem', 'video']
# Constants for determining if these components should be enabled for this course
SPLIT_TEST_COMPONENT_TYPE = 'split_test'
OPEN_ENDED_COMPONENT_TYPES = ["combinedopenended", "peergrading"]
NOTE_COMPONENT_TYPES = ['notes']
if settings.FEATURES.get('ALLOW_ALL_ADVANCED_COMPONENTS'):
ADVANCED_COMPONENT_TYPES = sorted(set(name for name, class_ in XBlock.load_classes()) - set(COMPONENT_TYPES))
else:
ADVANCED_COMPONENT_TYPES = settings.ADVANCED_COMPONENT_TYPES
ADVANCED_COMPONENT_CATEGORY = 'advanced'
ADVANCED_COMPONENT_POLICY_KEY = 'advanced_modules'
ADVANCED_PROBLEM_TYPES = settings.ADVANCED_PROBLEM_TYPES
CONTAINER_TEMPLATES = [
"basic-modal", "modal-button", "edit-xblock-modal",
"editor-mode-button", "upload-dialog",
"add-xblock-component", "add-xblock-component-button", "add-xblock-component-menu",
"add-xblock-component-menu-problem", "xblock-string-field-editor", "publish-xblock", "publish-history",
"unit-outline", "container-message", "license-selector",
]
def _advanced_component_types():
"""
Return advanced component types which can be created.
"""
return [c_type for c_type in ADVANCED_COMPONENT_TYPES if c_type not in settings.DEPRECATED_ADVANCED_COMPONENT_TYPES]
@require_GET
@login_required
def subsection_handler(request, usage_key_string):
"""
The restful handler for subsection-specific requests.
GET
html: return html page for editing a subsection
json: not currently supported
"""
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
usage_key = UsageKey.from_string(usage_key_string)
try:
course, item, lms_link, preview_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
# make sure that location references a 'sequential', otherwise return
# BadRequest
if item.location.category != 'sequential':
return HttpResponseBadRequest()
parent = get_parent_xblock(item)
# remove all metadata from the generic dictionary that is presented in a
# more normalized UI. We only want to display the XBlocks fields, not
# the fields from any mixins that have been added
fields = getattr(item, 'unmixed_class', item.__class__).fields
policy_metadata = dict(
(field.name, field.read_from(item))
for field
in fields.values()
if field.name not in ['display_name', 'start', 'due', 'format'] and field.scope == Scope.settings
)
can_view_live = False
subsection_units = item.get_children()
can_view_live = any([modulestore().has_published_version(unit) for unit in subsection_units])
return render_to_response(
'edit_subsection.html',
{
'subsection': item,
'context_course': course,
'new_unit_category': 'vertical',
'lms_link': lms_link,
'preview_link': preview_link,
'course_graders': json.dumps(CourseGradingModel.fetch(item.location.course_key).graders),
'parent_item': parent,
'locator': item.location,
'policy_metadata': policy_metadata,
'subsection_units': subsection_units,
'can_view_live': can_view_live
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
def _load_mixed_class(category):
"""
Load an XBlock by category name, and apply all defined mixins
"""
component_class = XBlock.load_class(category, select=settings.XBLOCK_SELECT_FUNCTION)
mixologist = Mixologist(settings.XBLOCK_MIXINS)
return mixologist.mix(component_class)
# pylint: disable=unused-argument
@require_GET
@login_required
def container_handler(request, usage_key_string):
"""
The restful handler for container xblock requests.
GET
html: returns the HTML page for editing a container
json: not currently supported
"""
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
try:
usage_key = UsageKey.from_string(usage_key_string)
except InvalidKeyError: # Raise Http404 on invalid 'usage_key_string'
raise Http404
with modulestore().bulk_operations(usage_key.course_key):
try:
course, xblock, lms_link, preview_lms_link = _get_item_in_course(request, usage_key)
except ItemNotFoundError:
return HttpResponseBadRequest()
component_templates = get_component_templates(course)
ancestor_xblocks = []
parent = get_parent_xblock(xblock)
action = request.REQUEST.get('action', 'view')
is_unit_page = is_unit(xblock)
unit = xblock if is_unit_page else None
while parent and parent.category != 'course':
if unit is None and is_unit(parent):
unit = parent
ancestor_xblocks.append(parent)
parent = get_parent_xblock(parent)
ancestor_xblocks.reverse()
assert unit is not None, "Could not determine unit page"
subsection = get_parent_xblock(unit)
assert subsection is not None, "Could not determine parent subsection from unit " + unicode(unit.location)
section = get_parent_xblock(subsection)
assert section is not None, "Could not determine ancestor section from unit " + unicode(unit.location)
# Fetch the XBlock info for use by the container page. Note that it includes information
# about the block's ancestors and siblings for use by the Unit Outline.
xblock_info = create_xblock_info(xblock, include_ancestor_info=is_unit_page)
if is_unit_page:
add_container_page_publishing_info(xblock, xblock_info)
# need to figure out where this item is in the list of children as the
# preview will need this
index = 1
for child in subsection.get_children():
if child.location == unit.location:
break
index += 1
return render_to_response('container.html', {
'context_course': course, # Needed only for display of menus at top of page.
'action': action,
'xblock': xblock,
'xblock_locator': xblock.location,
'unit': unit,
'is_unit_page': is_unit_page,
'subsection': subsection,
'section': section,
'new_unit_category': 'vertical',
'ancestor_xblocks': ancestor_xblocks,
'component_templates': json.dumps(component_templates),
'xblock_info': xblock_info,
'draft_preview_link': preview_lms_link,
'published_preview_link': lms_link,
'templates': CONTAINER_TEMPLATES
})
else:
return HttpResponseBadRequest("Only supports HTML requests")
def get_component_templates(courselike, library=False):
"""
Returns the applicable component templates that can be used by the specified course or library.
"""
def create_template_dict(name, cat, boilerplate_name=None, tab="common", hinted=False):
"""
Creates a component template dict.
Parameters
display_name: the user-visible name of the component
category: the type of component (problem, html, etc.)
boilerplate_name: name of boilerplate for filling in default values. May be None.
hinted: True if hinted problem else False
tab: common(default)/advanced, which tab it goes in
"""
return {
"display_name": name,
"category": cat,
"boilerplate_name": boilerplate_name,
"hinted": hinted,
"tab": tab
}
component_display_names = {
'discussion': _("Discussion"),
'html': _("HTML"),
'problem': _("Problem"),
'video': _("Video")
}
component_templates = []
categories = set()
# The component_templates array is in the order of "advanced" (if present), followed
# by the components in the order listed in COMPONENT_TYPES.
component_types = COMPONENT_TYPES[:]
# Libraries do not support discussions
if library:
component_types = [component for component in component_types if component != 'discussion']
for category in component_types:
templates_for_category = []
component_class = _load_mixed_class(category)
# add the default template with localized display name
# TODO: Once mixins are defined per-application, rather than per-runtime,
# this should use a cms mixed-in class. (cpennington)
display_name = xblock_type_display_name(category, _('Blank')) # this is the Blank Advanced problem
templates_for_category.append(create_template_dict(display_name, category, None, 'advanced'))
categories.add(category)
# add boilerplates
if hasattr(component_class, 'templates'):
for template in component_class.templates():
filter_templates = getattr(component_class, 'filter_templates', None)
if not filter_templates or filter_templates(template, courselike):
# Tab can be 'common' 'advanced'
# Default setting is common/advanced depending on the presence of markdown
tab = 'common'
if template['metadata'].get('markdown') is None:
tab = 'advanced'
hinted = template.get('hinted', False)
templates_for_category.append(
create_template_dict(
_(template['metadata'].get('display_name')), # pylint: disable=translation-of-non-string
category,
template.get('template_id'),
tab,
hinted,
)
)
# Add any advanced problem types
if category == 'problem':
for advanced_problem_type in ADVANCED_PROBLEM_TYPES:
component = advanced_problem_type['component']
boilerplate_name = advanced_problem_type['boilerplate_name']
try:
component_display_name = xblock_type_display_name(component)
except PluginMissingError:
log.warning('Unable to load xblock type %s to read display_name', component, exc_info=True)
else:
templates_for_category.append(
create_template_dict(component_display_name, component, boilerplate_name, 'advanced')
)
categories.add(component)
component_templates.append({
"type": category,
"templates": templates_for_category,
"display_name": component_display_names[category]
})
# Libraries do not support advanced components at this time.
if library:
return component_templates
# Check if there are any advanced modules specified in the course policy.
# These modules should be specified as a list of strings, where the strings
# are the names of the modules in ADVANCED_COMPONENT_TYPES that should be
# enabled for the course.
course_advanced_keys = courselike.advanced_modules
advanced_component_templates = {"type": "advanced", "templates": [], "display_name": _("Advanced")}
advanced_component_types = _advanced_component_types()
# Set component types according to course policy file
if isinstance(course_advanced_keys, list):
for category in course_advanced_keys:
if category in advanced_component_types and category not in categories:
# boilerplates not supported for advanced components
try:
component_display_name = xblock_type_display_name(category, default_display_name=category)
advanced_component_templates['templates'].append(
create_template_dict(
component_display_name,
category
)
)
categories.add(category)
except PluginMissingError:
# dhm: I got this once but it can happen any time the
# course author configures an advanced component which does
# not exist on the server. This code here merely
# prevents any authors from trying to instantiate the
# non-existent component type by not showing it in the menu
log.warning(
"Advanced component %s does not exist. It will not be added to the Studio new component menu.",
category
)
else:
log.error(
"Improper format for course advanced keys! %s",
course_advanced_keys
)
if len(advanced_component_templates['templates']) > 0:
component_templates.insert(0, advanced_component_templates)
return component_templates
@login_required
def _get_item_in_course(request, usage_key):
"""
Helper method for getting the old location, containing course,
item, lms_link, and preview_lms_link for a given locator.
Verifies that the caller has permission to access this item.
"""
# usage_key's course_key may have an empty run property
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
course_key = usage_key.course_key
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course = modulestore().get_course(course_key)
item = modulestore().get_item(usage_key, depth=1)
lms_link = get_lms_link_for_item(item.location)
preview_lms_link = get_lms_link_for_item(item.location, preview=True)
return course, item, lms_link, preview_lms_link
@login_required
def component_handler(request, usage_key_string, handler, suffix=''):
"""
Dispatch an AJAX action to an xblock
Args:
usage_id: The usage-id of the block to dispatch to
handler (str): The handler to execute
suffix (str): The remainder of the url to be passed to the handler
Returns:
:class:`django.http.HttpResponse`: The response from the handler, converted to a
django response
"""
usage_key = UsageKey.from_string(usage_key_string)
descriptor = modulestore().get_item(usage_key)
# Let the module handle the AJAX
req = django_to_webob_request(request)
try:
resp = descriptor.handle(handler, req, suffix)
except NoSuchHandlerError:
log.info("XBlock %s attempted to access missing handler %r", descriptor, handler, exc_info=True)
raise Http404
# unintentional update to handle any side effects of handle call
# could potentially be updating actual course data or simply caching its values
modulestore().update_item(descriptor, request.user.id)
return webob_to_django_response(resp)
|
brayden2544/Mystuff-final
|
refs/heads/master
|
manage/cached_templates/templates/base.htm.py
|
1
|
# -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397177451.490061
_enable_loop = True
_template_filename = 'C:\\app\\manage\\templates/base.htm'
_template_uri = 'base.htm'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['left']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_template.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def left():
return render_left(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'left'):
context['self'].left(**pageargs)
# SOURCE LINE 163
__M_writer('\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_left(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def left():
return render_left(context)
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer('\n <div class="panel-group" id="accordion">\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseOne">\n <span class="glyphicon glyphicon-th-list"></span>   Stores \n </a>\n </h4>\n </div>\n <div id="collapseOne" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/stores/"><button type="button" class="btn btn-default btn-block">View All Stores</button></a>\n \t\t\t\t<a href="/manage/edit_stores__new/"><button type="button" class="btn btn-default btn-block">Create New Store</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseTwo">\n <span class="glyphicon glyphicon-tags"></span>   Categories\n </a>\n </h4>\n </div>\n <div id="collapseTwo" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/category/"><button type="button" class="btn btn-default btn-block">View All Categories</button></a>\n \t\t\t\t<a href="/manage/edit_category__new/"><button type="button" class="btn btn-default btn-block">Add New Category</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseThree">\n <span class="glyphicon glyphicon-certificate"></span>   Brands\n </a>\n </h4>\n </div>\n <div id="collapseThree" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/brand/"><button type="button" class="btn btn-default btn-block">View All Brands</button></a>\n \t\t\t\t<a href="/manage/edit_brand__new/"><button type="button" class="btn btn-default btn-block">Add New Brand</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseFour">\n <span class="glyphicon glyphicon-tag"></span>   Products\n </a>\n </h4>\n </div>\n <div id="collapseFour" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/catalog_items/"><button type="button" class="btn btn-default btn-block">View All Catalog Items</button></a>\n \t\t\t\t<a href="/manage/edit_catalog_items__new/"><button type="button" class="btn btn-default btn-block">Add New Catalog Item</button></a>\n \t\t\t\t<a href="/manage/serial_inventory/"><button type="button" class="btn btn-default btn-block">View Serial Inventory</button></a>\n \t\t\t\t<a href="/manage/edit_serial_inventory__new/"><button type="button" class="btn btn-default btn-block">Add Serial Item</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseFive">\n <span class="glyphicon glyphicon-list-alt"></span>   Inventory \n </a>\n </h4>\n </div>\n <div id="collapseFive" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/inventory/"><button type="button" class="btn btn-default btn-block">View Inventory</button></a>\n \t\t\t\t<a href="/manage/add_inventory/"><button type="button" class="btn btn-default btn-block">Add Inventory by SKU</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseSix">\n <span class="glyphicon glyphicon-time"></span>   Rentals \n </a>\n </h4>\n </div>\n <div id="collapseSix" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/rental/"><button type="button" class="btn btn-default btn-block">Current Rentals</button></a>\n \t\t\t\t<a href="/manage/overdue_rental/"><button type="button" class="btn btn-default btn-block">Overdue Rentals</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseSeven">\n <span class="glyphicon glyphicon-wrench"></span>   Repairs\n </a>\n </h4>\n </div>\n <div id="collapseSeven" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/create_repair/"><button type="button" class="btn btn-default btn-block">Check In New Repair</button></a>\n <a href="/manage/repairs/"><button type="button" class="btn btn-default btn-block">View Current Repairs</button></a>\n \t\t\t\t <a href="/manage/rental/"><button type="button" class="btn btn-default btn-block">Change Repair Status</button></a>\n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseEight">\n <span class="glyphicon glyphicon-user"></span>   Users \n </a>\n </h4>\n </div>\n <div id="collapseEight" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/account/users/"><button type="button" class="btn btn-default btn-block">View All Users</button></a>\n <a href="/account/edit_user__new/"><button type="button" class="btn btn-default btn-block">Create New User</button></a>\n <a href="/account/new_employee_signup/"><button type="button" class="btn btn-default btn-block">Create New Employee</button></a>\n <a href="/account/users__staff/"><button type="button" class="btn btn-default btn-block">View All Employees</button></a> \n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseNine">\n <span class="glyphicon glyphicon-book"></span>   Accounting \n </a>\n </h4>\n </div>\n <div id="collapseNine" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/balancesheet/"><button type="button" class="btn btn-default btn-block"> Balance Sheet</button></a>\n <a href="/manage/incomesheet/"><button type="button" class="btn btn-default btn-block"> Income Statement</button></a>\n <a href="/manage/comissions/"><button type="button" class="btn btn-default btn-block"> Comissions Report</button></a>\n <a href="/manage/transactions/"><button type="button" class="btn btn-default btn-block"> Monthly Transactions</button></a>\n <a href="/manage/employee_transactions/"><button type="button" class="btn btn-default btn-block"> Employee Transactions</button></a> \n </div>\n </div>\n </div>\n <div class="panel panel-default">\n <div class="panel-heading">\n <h4 class="panel-title">\n <a data-toggle="collapse" data-parent="#accordion" href="#collapseTen">\n <span class="glyphicon glyphicon-off"></span>   Administration \n </a>\n </h4>\n </div>\n <div id="collapseTen" class="panel-collapse collapse">\n <div class="panel-body">\n <a href="/manage/administration/"><button type="button" class="btn btn-default btn-block">Administration Options</button></a>\n </div>\n </div>\n </div>\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
|
tangyiyong/odoo
|
refs/heads/8.0
|
addons/account_analytic_default/__openerp__.py
|
264
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Analytic Defaults',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Set default values for your analytic accounts.
==============================================
Allows to automatically select analytic accounts based on criterions:
---------------------------------------------------------------------
* Product
* Partner
* User
* Company
* Date
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'security/account_analytic_default_security.xml',
'account_analytic_default_view.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
uw-it-aca/myuw
|
refs/heads/master
|
myuw/dao/messages.py
|
1
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import csv
import os
import datetime
import bleach
from dateutil.parser import parse
from myuw.models import BannerMessage
from myuw.dao import get_netid_of_current_user, is_using_file_dao
from myuw.dao.admin import is_admin
from myuw.dao.gws import gws
from myuw.dao.term import get_comparison_datetime_with_tz
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.affiliation_data import get_data_for_affiliations
MESSAGE_ALLOWED_TAGS = bleach.sanitizer.ALLOWED_TAGS + ["span", "h1", "h2",
"h3", "h4"]
MESSAGE_ALLOWED_ATTRIBUTES = bleach.sanitizer.ALLOWED_ATTRIBUTES.copy()
MESSAGE_ALLOWED_ATTRIBUTES["*"] = ["class", "style", "aria-hidden"]
MESSAGE_ALLOWED_STYLES = ["font-size", "color"]
def get_current_messages(request):
current_date = get_comparison_datetime_with_tz(request)
affiliations = get_all_affiliations(request)
messages = get_data_for_affiliations(model=BannerMessage,
affiliations=affiliations,
start__lte=current_date,
end__gte=current_date,
is_published=True)
filtered = []
user_netid = get_netid_of_current_user(request)
for message in messages:
if message.group_id:
if (not is_using_file_dao() and
not gws.is_effective_member(message.group_id, user_netid)):
continue
filtered.append(message)
preview_id = request.GET.get('banner', None)
if preview_id:
try:
banner = BannerMessage.objects.get(preview_id=preview_id)
filtered.append(banner)
except BannerMessage.DoesNotExist:
pass
return filtered
def clean_html(input, additional_tags=None):
tags = MESSAGE_ALLOWED_TAGS[:]
if additional_tags:
tags += additional_tags
return bleach.clean(input, tags=tags,
attributes=MESSAGE_ALLOWED_ATTRIBUTES,
styles=MESSAGE_ALLOWED_STYLES)
|
smartlinux/nsxapitest
|
refs/heads/master
|
identity/case01_nsx_identity_login_script.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Test script for identity function.
# 用户处于已认证状态时,可以正常调用API;而已经注销的用户,则不能成功调用API;默认情况下用户处于已认证状态,调用API操作全程由HTTPS加密
#
import sys
import datetime
import libxml2
sys.path.append("..")
import rest
from nsx_basic_input import *
caseName = 'case01_nsx_identity_login'
restclient = rest.Rest(NSX_IP, VC_USER, VC_PWD, True)
def userLogin():
respData = restclient.put('%s/api/2.0/services/usermgmt/user/%s/enablestate/1'%(NSX_URL, NSX_USER),
'', 'userLogin')
output(restclient.getDebugInfo() + restclient.prettyPrint(respData))
def output(msg):
f = file(datetime.datetime.now().strftime("../log/" + caseName + "_output_20%y%m%d%H%M%S.log"), "w")
f.write(msg)
f.close()
def main():
userLogin()
print "NSX API", caseName, "completed successfully!"
if __name__ == "__main__":
sys.exit(main())
|
Rom4eg/myCompany
|
refs/heads/master
|
myCompany/users/views/auth.py
|
1
|
from django.contrib.auth import authenticate, login, logout
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from users.serializers import ResetPasswordSerializer
from users.serializers import UpdatePasswordSerializer
class Login(APIView):
def post(self, request):
username = request.data['username']
password = request.data['password']
user = authenticate(username=username, password=password)
if(user):
login(request, user)
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class ResetPassword(APIView):
serializer_classes = ResetPasswordSerializer
permission_classes = (AllowAny, )
def put(self, request):
serializer = ResetPasswordSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.update(serializer, serializer.data)
return Response({}, status=status.HTTP_202_ACCEPTED)
def patch(self, request):
serializer = UpdatePasswordSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
user = serializer.update(serializer, serializer.data)
login(request, user)
return Response({}, status=status.HTTP_200_OK)
|
jimi-c/ansible
|
refs/heads/devel
|
lib/ansible/plugins/filter/ipaddr.py
|
2
|
# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
if v.size > 1:
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable and next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
# Returns a minified list of subnets or a single subnet that spans all of
# the inputs.
def cidr_merge(value, action='merge'):
if not hasattr(value, '__iter__'):
raise errors.AnsibleFilterError('cidr_merge: expected iterable, got ' + repr(value))
if action == 'merge':
try:
return [str(ip) for ip in netaddr.cidr_merge(value)]
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
elif action == 'span':
# spanning_cidr needs at least two values
if len(value) == 0:
return None
elif len(value) == 1:
try:
return str(netaddr.IPNetwork(value[0]))
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
else:
try:
return str(netaddr.spanning_cidr(value))
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
else:
raise errors.AnsibleFilterError("cidr_merge: invalid action '%s'" % action)
def ipaddr(value, query='', version=False, alias='ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'first_usable': ('vtype',),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'last_usable': ('vtype',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'next_usable': ('vtype',),
'previous_usable': ('vtype',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'range_usable': ('vtype',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _address_prefix_query, # deprecate
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'first_usable': _first_usable_query,
'gateway': _gateway_query, # deprecate
'gw': _gateway_query, # deprecate
'host': _host_query,
'host/prefix': _address_prefix_query, # deprecate
'hostmask': _hostmask_query,
'hostnet': _gateway_query, # deprecate
'int': _int_query,
'ip': _ip_query,
'ip/prefix': _ip_prefix_query,
'ip_netmask': _ip_netmask_query,
# 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'last_usable': _last_usable_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'next_usable': _next_usable_query,
'netmask': _netmask_query,
'network': _network_query,
'network_id': _network_id_query,
'network/prefix': _subnet_query,
'network_netmask': _network_netmask_query,
'network_wildcard': _network_wildcard_query,
'prefix': _prefix_query,
'previous_usable': _previous_usable_query,
'private': _private_query,
'public': _public_query,
'range_usable': _range_usable_query,
'revdns': _revdns_query,
'router': _gateway_query, # deprecate
'size': _size_query,
'size_usable': _size_usable_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wildcard': _hostmask_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value is True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
# ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipmath(value, amount):
try:
ip = netaddr.IPAddress(value)
except netaddr.AddrFormatError:
msg = 'You must pass a valid IP address; {0} is invalid'.format(value)
raise errors.AnsibleFilterError(msg)
if not isinstance(amount, int):
msg = (
'You must pass an integer for arithmetic; '
'{0} is not a valid integer'
).format(amount)
raise errors.AnsibleFilterError(msg)
return str(ip + amount)
def ipwrap(value, query=''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version=False, alias='ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version=False, alias='ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query=''):
return ipaddr(value, query, version=4, alias='ipv4')
def ipv6(value, query=''):
return ipaddr(value, query, version=6, alias='ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query='', index='x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the next nth usable ip within a network described by value.
def next_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + offset))
# Returns the previous nth usable ip within a network described by value.
def previous_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - offset))
def _range_checker(ip_check, first, last):
'''
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
'''
if ip_check >= first and ip_check <= last:
return True
else:
return False
def _address_normalizer(value):
'''
Used to validate an address or network type and return it in a consistent format.
This is being used for future use cases not currently available such as an address range.
:param value: The string representation of an address or network.
:return: The address or network in the normalized form.
'''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address' or vtype == "network":
v = ipaddr(value, 'subnet')
except:
return False
return v
def network_in_usable(value, test):
'''
Checks whether 'test' is a useable address or addresses in 'value'
:param: value: The string representation of an address or network to test against.
:param test: The string representation of an address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'first_usable') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'last_usable') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def network_in_network(value, test):
'''
Checks whether the 'test' address or addresses are in 'value', including broadcast and network
:param: value: The network address or range to test against.
:param test: The address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'network') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'broadcast') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def reduce_on_network(value, network):
'''
Reduces a list of addresses to only the addresses that match a given network.
:param: value: The list of addresses to filter on.
:param: network: The network to validate against.
:return: The reduced list of addresses.
'''
# normalize network variable into an ipaddr
n = _address_normalizer(network)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
n_first = ipaddr(ipaddr(n, 'network') or ipaddr(n, 'address'), 'int')
n_last = ipaddr(ipaddr(n, 'broadcast') or ipaddr(n, 'address'), 'int')
# create an empty list to fill and return
r = []
for address in value:
# normalize address variables into an ipaddr
a = _address_normalizer(address)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
a_first = ipaddr(ipaddr(a, 'network') or ipaddr(a, 'address'), 'int')
a_last = ipaddr(ipaddr(a, 'broadcast') or ipaddr(a, 'address'), 'int')
if _range_checker(a_first, n_first, n_last) and _range_checker(a_last, n_first, n_last):
r.append(address)
return r
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query=''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias='slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query='', alias='hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query=''):
return hwaddr(value, query, alias='macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The %s filter requires python-netaddr be '
'installed on the ansible controller' % f_name)
def ip4_hex(arg, delimiter=''):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}'.format(*numbers, sep=delimiter)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'cidr_merge': cidr_merge,
'ipaddr': ipaddr,
'ipmath': ipmath,
'ipwrap': ipwrap,
'ip4_hex': ip4_hex,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'next_nth_usable': next_nth_usable,
'network_in_network': network_in_network,
'network_in_usable': network_in_usable,
'reduce_on_network': reduce_on_network,
'nthhost': nthhost,
'previous_nth_usable': previous_nth_usable,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
|
drummonds/remittance
|
refs/heads/master
|
tests/__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""Unit test package for remittance."""
|
kreatorkodi/repository.torrentbr
|
refs/heads/master
|
plugin.video.elementum/resources/site-packages/bjsonrpc/varsync/__init__.py
|
11
|
# encoding: UTF-8
"""
Variables Syncronized over json-rpc at both ends.
This package has several datatypes to ease variable-sharing across peers.
Proposed Utils:
* FIFOBuffer: streams lists of bytes/numbers/etc over network.
Proposed Types:
* FloatSync: floating point number synced over network
- sync_mode: fixed master-slave, dynamic master-slave,
queued master-master
- get, set(value)
- apply(operator, value) -- sum 3, mult 1.5, set 6 ...
- addcallback(fn), removecallback(fn)
* TupleSync: Tuple synced over network
- sync_mode: fixed master-slave, dynamic master-slave,
queued master-master
- get, set(value)
- addcallback(fn), removecallback(fn)
* StringSync: (small) String synced over network
- sync_mode: fixed master-slave, dynamic master-slave,
queued master-master
- get, set(value)
- addcallback(fn), removecallback(fn)
* DictSync: dict of elements synced over network
- sync_mode: fixed master-slave, dynamic master-slave,
queued master-master
- get(x), set(x,value)
- apply(x, operator, value)
- addcallback(fn), removecallback(fn)
* ListSync: list of elements synced over network.
- sync_mode: fixed master-slave, dynamic master-slave,
queued master-master
- primary_key: index, full-item, dict-column, list-index
- item_style: single-object, list, dict
- clear
- append(x),push(x)
- remove(x),pop(x)
- update(x,value)
- addcallback(fn), removecallback(fn)
"""
|
fitermay/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/hgext/convert/darcs.py
|
94
|
# darcs.py - darcs support for the convert extension
#
# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, checktool, commandline, commit, converter_source
from mercurial.i18n import _
from mercurial import util
import os, shutil, tempfile, re
# The naming drift of ElementTree is fun!
try:
from xml.etree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from xml.etree.ElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.ElementTree import ElementTree, XMLParser
except ImportError:
pass
class darcs_source(converter_source, commandline):
def __init__(self, ui, path, rev=None):
converter_source.__init__(self, ui, path, rev=rev)
commandline.__init__(self, ui, 'darcs')
# check for _darcs, ElementTree so that we can easily skip
# test-convert-darcs if ElementTree is not around
if not os.path.exists(os.path.join(path, '_darcs')):
raise NoRepo(_("%s does not look like a darcs repository") % path)
checktool('darcs')
version = self.run0('--version').splitlines()[0].strip()
if version < '2.1':
raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
version)
if "ElementTree" not in globals():
raise util.Abort(_("Python ElementTree module is not available"))
self.path = os.path.realpath(path)
self.lastrev = None
self.changes = {}
self.parents = {}
self.tags = {}
# Check darcs repository format
format = self.format()
if format:
if format in ('darcs-1.0', 'hashed'):
raise NoRepo(_("%s repository format is unsupported, "
"please upgrade") % format)
else:
self.ui.warn(_('failed to detect repository format!'))
def before(self):
self.tmppath = tempfile.mkdtemp(
prefix='convert-' + os.path.basename(self.path) + '-')
output, status = self.run('init', repodir=self.tmppath)
self.checkexit(status)
tree = self.xml('changes', xml_output=True, summary=True,
repodir=self.path)
tagname = None
child = None
for elt in tree.findall('patch'):
node = elt.get('hash')
name = elt.findtext('name', '')
if name.startswith('TAG '):
tagname = name[4:].strip()
elif tagname is not None:
self.tags[tagname] = node
tagname = None
self.changes[node] = elt
self.parents[child] = [node]
child = node
self.parents[child] = []
def after(self):
self.ui.debug('cleaning up %s\n' % self.tmppath)
shutil.rmtree(self.tmppath, ignore_errors=True)
def recode(self, s, encoding=None):
if isinstance(s, unicode):
# XMLParser returns unicode objects for anything it can't
# encode into ASCII. We convert them back to str to get
# recode's normal conversion behavior.
s = s.encode('latin-1')
return super(darcs_source, self).recode(s, encoding)
def xml(self, cmd, **kwargs):
# NOTE: darcs is currently encoding agnostic and will print
# patch metadata byte-for-byte, even in the XML changelog.
etree = ElementTree()
# While we are decoding the XML as latin-1 to be as liberal as
# possible, etree will still raise an exception if any
# non-printable characters are in the XML changelog.
parser = XMLParser(encoding='latin-1')
p = self._run(cmd, **kwargs)
etree.parse(p.stdout, parser=parser)
p.wait()
self.checkexit(p.returncode)
return etree.getroot()
def format(self):
output, status = self.run('show', 'repo', no_files=True,
repodir=self.path)
self.checkexit(status)
m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
if not m:
return None
return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
def manifest(self):
man = []
output, status = self.run('show', 'files', no_directories=True,
repodir=self.tmppath)
self.checkexit(status)
for line in output.split('\n'):
path = line[2:]
if path:
man.append(path)
return man
def getheads(self):
return self.parents[None]
def getcommit(self, rev):
elt = self.changes[rev]
date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
# etree can return unicode objects for name, comment, and author,
# so recode() is used to ensure str objects are emitted.
return commit(author=self.recode(elt.get('author')),
date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
desc=self.recode(desc).strip(),
parents=self.parents[rev])
def pull(self, rev):
output, status = self.run('pull', self.path, all=True,
match='hash %s' % rev,
no_test=True, no_posthook=True,
external_merge='/bin/false',
repodir=self.tmppath)
if status:
if output.find('We have conflicts in') == -1:
self.checkexit(status, output)
output, status = self.run('revert', all=True, repodir=self.tmppath)
self.checkexit(status, output)
def getchanges(self, rev):
copies = {}
changes = []
man = None
for elt in self.changes[rev].find('summary').getchildren():
if elt.tag in ('add_directory', 'remove_directory'):
continue
if elt.tag == 'move':
if man is None:
man = self.manifest()
source, dest = elt.get('from'), elt.get('to')
if source in man:
# File move
changes.append((source, rev))
changes.append((dest, rev))
copies[dest] = source
else:
# Directory move, deduce file moves from manifest
source = source + '/'
for f in man:
if not f.startswith(source):
continue
fdest = dest + '/' + f[len(source):]
changes.append((f, rev))
changes.append((fdest, rev))
copies[fdest] = f
else:
changes.append((elt.text.strip(), rev))
self.pull(rev)
self.lastrev = rev
return sorted(changes), copies
def getfile(self, name, rev):
if rev != self.lastrev:
raise util.Abort(_('internal calling inconsistency'))
path = os.path.join(self.tmppath, name)
data = util.readfile(path)
mode = os.lstat(path).st_mode
mode = (mode & 0111) and 'x' or ''
return data, mode
def gettags(self):
return self.tags
|
hip-odoo/odoo
|
refs/heads/10.0
|
addons/website_mail/models/__init__.py
|
24
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import mail_message
import update
|
swegener/micropython
|
refs/heads/master
|
tests/float/python36.py
|
6
|
# tests for things that only Python 3.6 supports, needing floats
# underscores in numeric literals
print(1_000.1_8)
print('%.2g' % 1e1_2)
# underscore supported by int/float constructors
print(float('1_2_3'))
print(float('1_2_3.4'))
print('%.2g' % float('1e1_3'))
|
patrioticcow/MessagesForSkype
|
refs/heads/master
|
packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/pdb.py
|
90
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % funcname)
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
exec code in globals, locals
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# Start with fresh empty copy of globals and locals and tell the script
# that it's being run as __main__ to avoid scripts being able to access
# the pdb.py namespace.
globals_ = {"__name__" : "__main__", "__file__" : filename}
locals_ = globals_
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement, globals=globals_, locals=locals_)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t):
p = Pdb()
p.reset()
while t.tb_next is not None:
t = t.tb_next
p.interaction(t.tb_frame, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:]:
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. The best approach would be to
# have a "restart" command which would allow explicit specification of
# command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__=='__main__':
main()
|
hlt-mt/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/framework_lib.py
|
6
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""Classes and functions for building TensorFlow graphs.
## Core graph data structures
@@Graph
@@Operation
@@Tensor
## Tensor types
@@DType
@@as_dtype
## Utility functions
@@device
@@name_scope
@@control_dependencies
@@convert_to_tensor
@@convert_to_tensor_or_indexed_slices
@@get_default_graph
@@reset_default_graph
@@import_graph_def
@@load_op_library
## Graph collections
@@add_to_collection
@@get_collection
@@GraphKeys
## Defining new operations
@@RegisterGradient
@@NoGradient
@@RegisterShape
@@TensorShape
@@Dimension
@@op_scope
@@get_seed
## For libraries building on TensorFlow
@@register_tensor_conversion_function
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Classes used when building a Graph.
from tensorflow.python.framework.ops import Graph
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.framework.ops import SparseTensor
from tensorflow.python.framework.ops import SparseTensorValue
from tensorflow.python.framework.ops import IndexedSlices
# Utilities used when building a Graph.
from tensorflow.python.framework.ops import device
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.framework.ops import control_dependencies
from tensorflow.python.framework.ops import get_default_graph
from tensorflow.python.framework.ops import reset_default_graph
from tensorflow.python.framework.ops import GraphKeys
from tensorflow.python.framework.ops import add_to_collection
from tensorflow.python.framework.ops import get_collection
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import convert_to_tensor_or_indexed_slices
from tensorflow.python.framework.random_seed import get_seed
from tensorflow.python.framework.random_seed import set_random_seed
from tensorflow.python.framework.importer import import_graph_def
# Needed when you defined a new Op in C++.
from tensorflow.python.framework.ops import RegisterGradient
from tensorflow.python.framework.ops import NoGradient
from tensorflow.python.framework.ops import RegisterShape
from tensorflow.python.framework.tensor_shape import Dimension
from tensorflow.python.framework.tensor_shape import TensorShape
# Needed when interfacing tensorflow to new array libraries
from tensorflow.python.framework.ops import register_tensor_conversion_function
from tensorflow.python.framework.dtypes import *
# Load a TensorFlow plugin
from tensorflow.python.framework.load_library import *
|
ChristinaHammer/Client_Database
|
refs/heads/master
|
cdbfunctions.py
|
1
|
"""cdbfunctions.py
Developer: Noelle Todd
Last Updated: August 30, 2014
This module consists of functions which will be called by the user
interface, in order to insert, delete, update, etc. data in the database.
This module is still in its early testing stages; many more functions will
be added or edited in the following weeks.
"""
import sqlalchemy
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import desc
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta, date
from cdbtabledef import Household, Person, Volunteer, Visit
class volunteerData:
"""This class is used for inserting/selecting a volunteer into/from
the database.
"""
def __init__(self, firstname, lastname, color, phone=None, active=True):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.color = color
self.phone = str(phone)
self.active = active
class newClientData:
"""This class is used for inserting a new client into the
database.
"""
def __init__(self, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.phone = str(phone)
self.dateJoined = dateJoined
class oldClientData:
"""This class is used for updating old clients and for
returning information for a client.
"""
def __init__(self, id, firstname, lastname, dob, phone=None,
dateJoined=datetime.now()):
self.id = id
self.firstname = str(firstname)
self.lastname = str(lastname)
self.dob = dob
self.age = age(dob)
self.phone = str(phone)
self.dateJoined = dateJoined
class houseData:
"""This class is used to hold data for inserting a household,
updating a household, or returning household information.
"""
def __init__(self, street, city='Troy', state='NY', zip='12180',
dateVerified=None, apt=None):
self.street = street
self.city = city
self.state = state
self.zip = zip
self.dateVerified = dateVerified
self.apt = apt
class visitData:
"""This class is used to hold data for inserting a visit
"""
def __init__(self, Vol_ID, visitDate=datetime.now(), notes=None):
self.Vol_ID = Vol_ID
self.visitDate = visitDate
self.notes = notes
class visitDataReturn:
"""This class is used for returning data for the list_visits function.
"""
def __init__(self, visitDate, clientname, volname, notes=None,
vid=None):
self.date = visitDate
self.visitor = clientname
self.volunteer = volname
self.notes = notes
self.visitID = vid
#functions for inserts
def insert_household(s, street, dateverified=None, Apt=None,
City='Troy', State='NY', Zip='12180'):
"""This function creates a new row to hold a household's data. It returns
the household id, which will be used when we insert household members.
"""
newhouse = Household(street_address = street, apt = Apt, city = City,
state = State, zip = Zip,
date_verified = dateverified)
s.add(newhouse)
s.commit()
#return newhouse.id
return newhouse
def insert_person(s, firstname, lastname, dob, newhouse,
datejoined=datetime.now(), phonenum=None):
"""This function creates a new row to hold an individual's data. There is
no return.
"""
newpers = Person(first_name=firstname, last_name=lastname, DOB=dob,
date_joined=datejoined, phone=phonenum)
newpers.HH_ID = newhouse
newpers.age = age(dob)
s.add(newpers)
s.commit()
#return newpers.id
return newpers
def insert_volunteer(s, firstname, lastname, phonenum=None, active=True, color='light blue'):
"""This function creates a new row in the Volunteer table, to hold
a volunteer's data.
"""
new_vol = Volunteer(first_name=firstname, last_name=lastname,
phone=phonenum, active=active, color=color)
s.add(new_vol)
s.commit()
def insert_visit(s, Vol_id, pers_id, house_id, date_of_visit=datetime.now(),
notes=None):
"""This function creates a new row in the Visits table to hold
the data for a visit.
"""
new_visit = Visit(I_ID=pers_id, HH_ID=house_id, Vol_ID=Vol_id,
date=date_of_visit, visit_notes=notes)
s.add(new_visit)
s.commit()
#functions for updating records
def update_household(s, HH_ID, street, city, state, zip, apt=None,
date_verified=None):
"""This function will update a households records
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
house.street_address = street
house.city = city
house.state = state
house.zip = zip
house.apt = apt
house.date_verified = date_verified
s.commit()
def update_person(s, I_ID, firstname, lastname, dob, phonenum=None):
"""This function will update a person's records.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
pers.first_name = firstname
pers.last_name = lastname
pers.DOB = dob
pers.phone = phonenum
pers.age = age(dob)
s.commit()
def update_visit(s, vis_id, date_of_visit=datetime.now(),
notes=None):
"""This function will update a visit's record.
"""
visit = s.query(Visit).filter(Visit.id == vis_id).one()
visit.date = date_of_visit
visit.visit_notes = notes
s.commit()
def update_volunteer(s, vol_id, firstname, lastname, phonenum, active, color):
"""This function will update a volunteer's records.
"""
vol = s.query(Volunteer).filter(Volunteer.id == vol_id).one()
vol.first_name = firstname
vol.last_name = lastname
vol.phone = phonenum
vol.active = active
vol.color = color
s.commit()
#delete functions
def delete_household(s, HH_ID):
"""This function deletes a household record from the database.
"""
house = s.query(Household).filter(Household.id == HH_ID).one()
s.delete(house)
s.commit()
def delete_person(s, I_ID):
"""This function will delete an individual from the database.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
s.delete(pers)
s.commit()
def delete_volunteer(s, Vol_ID):
"""This function will delete a volunteer if the volunteer has
not participated in a visit. Else, it will "deactivate" the
volunteer.
"""
vol = s.query(Volunteer).filter(Volunteer.id == Vol_ID).one()
s.delete(vol)
s.commit()
def delete_visit(s, Vi_ID):
"""This function will delete a visit from the database.
"""
vis = s.query(Visit).filter(Visit.id == Vi_ID).one()
s.delete(vis)
s.commit()
#helper functions
def age(dob):
"""This function calculates a person's age using the dob input to it.
"""
timey = datetime.now()
if timey.month > dob.month:
return timey.year - dob.year
elif timey.month < dob.month:
return timey.year - dob.year - 1
else:
if timey.day >= dob.day:
return timey.year - dob.year
else:
return timey.year - dob.year - 1
def list_visits(s, I_ID):
"""This function will find the past visits for a household
and return them as a list of visitDataReturn objects.
"""
visits = []
pers = s.query(Person).filter(Person.id == I_ID).one()
house = s.query(Household).filter(Household.id == pers.HH_ID).one()
#returns all visits for the household in descending order of date
visithistory = s.query(Visit, Person, Volunteer).\
filter(Visit.HH_ID == house.id).\
filter(Visit.I_ID == Person.id).\
filter(Visit.Vol_ID == Volunteer.id).\
order_by(desc(Visit.date)).all()
#retrieves information for past three visits and returns in a list.
for instance in visithistory:
clientname = instance.Person.first_name + " " +\
instance.Person.last_name
volname = instance.Volunteer.first_name + " " +\
instance.Volunteer.last_name
visit = visitDataReturn(instance.Visit.date, clientname, volname,
notes=instance.Visit.visit_notes,
vid=instance.Visit.id)
visits.append(visit)
return visits
def get_age_breakdown(members):
"""This function will retrieve all the ages of the members, and return the
number of adults, seniors, children, and infants accordingly.
"""
infants = 0
children = 0
adults = 0
seniors = 0
for member in members:
if member.age < 2:
infants = infants + 1
elif member.age >= 2 and member.age < 18:
children = children + 1
elif member.age >= 18 and member.age < 65:
adults = adults + 1
else:
seniors = seniors + 1
total = infants + children + adults + seniors
agegroups = {'infants':infants, 'children':children, 'adults':adults,
'seniors':seniors, 'total':total}
return agegroups
def generate_report(s, duration):
"""This function will generate a csv/excel file that holds all
relevant info for a monthly report.
First name, Last name (of visitor only)
City of household
#of children, seniors, adults, and infants
total number of each of the above
total number of households
total number of people served
"""
import csv
#open file and so on
today = datetime.now()
filename = str(today.month)+ "-" + str(today.day) + "-" + str(today.year) +\
"-report.csv"
csvfile = open(filename, 'w', newline='')
outcsv = csv.writer(csvfile)
#calculate a month ago
today = datetime.now()
#duration = timedelta(days=31)
month_ago = today - duration
#convert date objects to strings for comparison purposes
month_ago = str(month_ago)
#one giant massive query
select = sqlalchemy.sql.select([Person.first_name, Person.last_name,
Household.seniors, Household.adults,
Household.children, Household.infants,
Household.city, Visit.date])\
.where(Visit.I_ID == Person.id)\
.where(Visit.HH_ID == Household.id)\
.where(Visit.date >= month_ago)
records = s.execute(select)
outcsv.writerow(records.keys())
outcsv.writerows(records)
csvfile.close()
s.close()
|
vmax-feihu/hue
|
refs/heads/master
|
apps/zookeeper/src/zookeeper/windmilltests.py
|
36
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from desktop.lib.windmill_util import logged_in_client
def test_zookeeper():
""" launches the default view for zookeeper """
client = logged_in_client()
client.click(id='ccs-zookeeper-menu')
client.waits.forElement(classname='CCS-zookeeper', timeout='2000')
|
zakuro9715/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/conf/urls/shortcut.py
|
353
|
from django.conf.urls.defaults import *
urlpatterns = patterns('django.views',
(r'^(?P<content_type_id>\d+)/(?P<object_id>.*)/$', 'defaults.shortcut'),
)
|
petrleocompel/gnome15
|
refs/heads/master
|
src/gnome15/g15util.py
|
8
|
# Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2010 Brett Smith <tanktarta@blueyonder.co.uk>
# Copyright (C) 2013 Nuno Araujo <nuno.araujo@russo79.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
This file only exists to keep compatibility with 3rd party plugins.
It has been splitted into several files
'''
import util.g15cairo as g15cairo
import util.g15convert as g15convert
import util.g15gconf as g15gconf
import util.g15icontools as g15icontools
import util.g15markup as g15markup
import util.g15os as g15os
import util.g15pythonlang as g15pythonlang
import util.g15scheduler as g15scheduler
import util.g15svg as g15svg
import util.g15uigconf as g15uigconf
import g15notify
import g15driver
def execute_for_output(cmd):
return g15os.get_command_output(cmd)
def run_script(script, args = None, background = True):
return g15os.run_script(script, args, background)
def attr_exists(obj, attr_name):
return g15pythonlang.attr_exists(obj, attr_name)
def call_if_exists(obj, function_name, *args):
g15pythonlang.call_if_exists(obj, function_name, args)
def configure_colorchooser_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, default_alpha = None):
g15uigconf.configure_colorchooser_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, default_alpha)
def to_cairo_rgba(gconf_client, key, default):
return g15gconf.get_cairo_rgba_or_default(gconf_client, key, default)
def color_changed(widget, gconf_client, key):
g15uigconf.color_changed(widget, gconf_client, key)
def rgb_to_string(rgb):
return g15convert.rgb_to_string(rgb)
def get_alt_color(color):
return g15convert.get_alt_color(color)
def color_to_rgb(color):
return g15convert.color_to_rgb(color)
def to_rgb(string_rgb, default = None):
return g15convert.to_rgb(string_rgb, default)
def to_pixel(rgb):
return g15convert.to_pixel(rgb)
def to_color(rgb):
return g15convert.to_color(rgb)
def spinner_changed(widget, gconf_client, key, model, decimal = False):
g15uigconf.spinner_changed(widget, gconf_client, key, model, decimal)
def configure_spinner_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, decimal = False):
g15uigconf.configure_spinner_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, decimal)
def configure_combo_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree):
g15uigconf.configure_combo_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree)
def combo_box_changed(widget, gconf_client, key, model, default_value):
g15uigconf.combo_box_changed(widget, gconf_client, key, model, default_value)
def boolean_conf_value_change(client, connection_id, entry, args):
g15uigconf.boolean_conf_value_change(client, connection_id, entry, args)
def text_conf_value_change(client, connection_id, entry, args):
g15uigconf.text_conf_value_change(client, connection_id, entry, args)
def radio_conf_value_change(client, connection_id, entry, args):
g15uigconf.radio_conf_value_change(client, connection_id, entry, args)
def configure_checkbox_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, watch_changes = False):
return g15uigconf.configure_checkbox_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, watch_changes)
def configure_text_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, watch_changes = False):
return g15uigconf.configure_text_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree, watch_changes)
def configure_radio_from_gconf(gconf_client, gconf_key, widget_ids , gconf_values, default_value, widget_tree, watch_changes = False):
return g15uigconf.configure_radio_from_gconf(gconf_client, gconf_key, widget_ids , gconf_values, default_value, widget_tree, watch_changes)
def configure_adjustment_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree):
g15uigconf.configure_adjustment_from_gconf(gconf_client, gconf_key, widget_id, default_value, widget_tree)
def adjustment_changed(adjustment, key, gconf_client, integer = True):
g15uigconf.adjustment_changed(adjustment, key, gconf_client, integer)
def checkbox_changed(widget, key, gconf_client):
g15uigconf.checkbox_changed(widget, key, gconf_client)
def text_changed(widget, key, gconf_client):
g15uigconf.text_changed(widget, key, gconf_client)
def radio_changed(widget, key, gconf_client, gconf_value):
g15uigconf.radio_changed(widget, key, gconf_client, gconf_value)
def get_float_or_default(gconf_client, key, default = None):
return g15gconf.get_float_or_default(gconf_client, key, default)
def get_string_or_default(gconf_client, key, default = None):
return g15gconf.get_string_or_default(gconf_client, key, default)
def get_bool_or_default(gconf_client, key, default = None):
return g15gconf.get_bool_or_default(gconf_client, key, default)
def get_int_or_default(gconf_client, key, default = None):
return g15gconf.get_int_or_default(gconf_client, key, default)
def get_rgb_or_default(gconf_client, key, default = None):
return g15gconf.get_rgb_or_default(gconf_client, key, default)
def is_gobject_thread():
return g15pythonlang.is_gobject_thread()
def set_gobject_thread():
g15pythonlang.set_gobject_thread()
def get_lsb_release():
return g15os.get_lsb_release()
def get_lsb_distributor():
return g15os.get_lsb_distributor()
def append_if_exists( el, key, val, formatter = "%s"):
return g15pythonlang.append_if_exists( el, key, val, formatter)
def get_command_output( cmd):
return g15os.get_command_output( cmd)
def module_exists(module_name):
return g15pythonlang.module_exists(module_name)
def value_or_empty(d, key):
return g15pythonlang.value_or_empty(d, key)
def value_or_blank(d, key):
return g15pythonlang.value_or_blank(d, key)
def value_or_default(d, key, default_value):
return g15pythonlang.value_or_default(d, key, default_value)
def find(f, seq):
return g15pythonlang.find(f, seq)
def mkdir_p(path):
g15os.mkdir_p(path)
def notify(summary, body = "", icon = "dialog-info", actions = [], hints = {}, timeout = 0):
return g15notify.notify(summary, body, icon, actions, hints, timeout, 0)
def strip_tags(html):
return g15markup.strip_tags(html)
def total_seconds(time_delta):
return g15pythonlang.total_seconds(time_delta)
def rgb_to_uint16(r, g, b):
return g15convert.rgb_to_uint16(r, g, b)
def rgb_to_hex(rgb):
return g15convert.rgb_to_hex(rgb)
def degrees_to_radians(degrees):
return g15convert.degrees_to_radians(degrees)
def rotate(context, degrees):
g15cairo.rotate(context, degrees)
def rotate_around_center(context, width, height, degrees):
g15cairo.rotate_around_center(context, width, height, degrees)
def flip_horizontal(context, width, height):
g15cairo.flip_horizontal(context, width, height)
def flip_vertical(context, width, height):
g15cairo.flip_vertical(context, width, height)
def flip_hv_centered_on(context, fx, fy, cx, cy):
g15cairo.flip_hv_centered_on(context, fx, fy, cx, cy)
def get_cache_filename(filename, size = None):
return g15cairo.get_cache_filename(filename, size)
def get_image_cache_file(filename, size = None):
return g15cairo.get_image_cache_file(filename, size)
def is_url(path):
return g15cairo.is_url(path)
def load_surface_from_file(filename, size = None):
return g15cairo.load_surface_from_file(filename, size)
def load_svg_as_surface(filename, size):
return g15cairo.load_svg_as_surface(filename, size)
def image_to_surface(image, type = "ppm"):
return g15cairo.image_to_surface(image, type)
def pixbuf_to_surface(pixbuf, size = None):
return g15cairo.pixbuf_to_surface(pixbuf, size)
def local_icon_or_default(icon_name, size = 128):
return g15icontools.local_icon_or_default(icon_name, size)
def get_embedded_image_url(path):
return g15icontools.get_embedded_image_url(path)
def get_icon_path(icon = None, size = 128, warning = True, include_missing = True):
return g15icontools.get_icon_path(icon, size, warning, include_missing)
def get_app_icon(gconf_client, icon, size = 128):
return g15icontools.get_app_icon(gconf_client, icon, size)
def get_icon(gconf_client, icon, size = None):
return g15icontools.get_icon(gconf_client, icon, size)
def paint_thumbnail_image(allocated_size, image, canvas):
return g15cairo.paint_thumbnail_image(allocated_size, image, canvas)
def get_scale(target, actual):
return g15cairo.get_scale(target, actual)
def approx_px_to_pt(px):
return g15cairo.approx_px_to_pt(px)
def rotate_element(element, degrees):
g15svg.rotate_element(element, degrees)
def split_args(args):
return g15pythonlang.split_args(args)
def get_transforms(element, position_only = False):
return g15svg.get_transforms(element, position_only)
def get_location(element):
return g15svg.get_location(element)
def get_actual_bounds(element, relative_to = None):
return g15svg.get_actual_bounds(element, relative_to)
def get_bounds(element):
return g15svg.get_bounds(element)
def image_to_pixbuf(im, type = "ppm"):
return g15cairo.image_to_pixbuf(im, type)
def surface_to_pixbuf(surface):
return g15cairo.surface_to_pixbuf(surface)
def get_key_names(keys):
return g15driver.get_key_names(keys)
def html_escape(text):
return g15markup.html_escape(text)
def parse_as_properties(properties_string):
return g15pythonlang.parse_as_properties(properties_string)
def to_int_or_none(s):
return g15pythonlang.to_int_or_none(s)
def to_float_or_none(s):
return g15pythonlang.to_float_or_none(s)
|
jalilm/ryu
|
refs/heads/master
|
tools/install_venv.py
|
56
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Quantum's development virtualenv
"""
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
VENV_EXISTS = bool(os.path.exists(VENV))
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
raise Exception('Command "%s" failed.\n%s' % (' '.join(cmd), output))
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
raise Exception('Virtualenv not found. ' + \
'Try installing python-virtualenv')
print 'done.'
def create_virtualenv(venv=VENV, install_pip=False):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
install = ['virtualenv', '-q', venv]
run_command(install)
print 'done.'
print 'Installing pip in virtualenv...',
if install_pip and \
not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']):
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
run_command(['tools/with_venv.sh', 'pip', 'install', '-r',
PIP_REQUIRES], redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-r',
TEST_REQUIRES], redirect_output=False)
# Tell the virtual env how to "import quantum"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"quantum.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Quantum development environment setup is complete.
Quantum development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Quantum virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
|
flotre/sickbeard-vfvo
|
refs/heads/master
|
sickbeard/scene_exceptions.py
|
30
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import urllib, urllib2, httplib
import sickbeard
from sickbeard import helpers
from sickbeard import name_cache
from sickbeard import logger
from sickbeard import db
try:
import json
except ImportError:
from lib import simplejson as json
from sickbeard.exceptions import ex
def get_scene_exceptions(tvdb_id, season=-1):
"""
Given a tvdb_id, return a list of all the scene exceptions.
"""
myDB = db.DBConnection("cache.db")
exceptions = myDB.select("SELECT show_name FROM scene_exceptions WHERE tvdb_id = ? and season= ?", [tvdb_id, season])
return [cur_exception["show_name"] for cur_exception in exceptions]
def get_scene_exception_by_name(show_name):
"""
Given a show name, return the tvdbid of the exception, None if no exception
is present.
"""
myDB = db.DBConnection("cache.db")
# try the obvious case first
exception_result = myDB.select("SELECT tvdb_id FROM scene_exceptions WHERE LOWER(show_name) = ?", [show_name.lower()])
if exception_result:
return int(exception_result[0]["tvdb_id"])
all_exception_results = myDB.select("SELECT show_name, tvdb_id FROM scene_exceptions")
for cur_exception in all_exception_results:
cur_exception_name = cur_exception["show_name"]
cur_tvdb_id = int(cur_exception["tvdb_id"])
if show_name.lower() in (cur_exception_name.lower(), helpers.sanitizeSceneName(cur_exception_name).lower().replace('.', ' ')):
logger.log(u"Scene exception lookup got tvdb id "+str(cur_tvdb_id)+u", using that", logger.DEBUG)
return cur_tvdb_id
return None
def retrieve_exceptions():
"""
Looks up the exceptions on github, parses them into a dict, and inserts them into the
scene_exceptions table in cache.db. Also clears the scene name cache.
"""
exception_dict = {}
# exceptions are stored on github pages
url = 'http://midgetspy.github.com/sb_tvdb_scene_exceptions/exceptions.txt'
excepfile= os.path.join(os.path.join(sickbeard.PROG_DIR,'Used_Files'),'exceptions.txt')
logger.log(u"Check scene exceptions file to update db")
f=open(excepfile,"r")
data = f.read()
if data is None:
# When urlData is None, trouble connecting to github
logger.log(u"Check scene exceptions update failed. Unable to get file: " + excepfile, logger.ERROR)
return
else:
# each exception is on one line with the format tvdb_id: 'show name 1', 'show name 2', etc
for cur_line in data.splitlines():
try:
cur_line = cur_line.decode('utf-8')
except:
cur_line = cur_line.decode('latin-1')
tvdb_id, sep, aliases = cur_line.partition(':') #@UnusedVariable
if not aliases:
continue
tvdb_id = int(tvdb_id)
# regex out the list of shows, taking \' into account
alias_list = [{re.sub(r'\\(.)', r'\1', x):-1} for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
exception_dict[tvdb_id] = alias_list
xem_exceptions = _xem_excpetions_fetcher()
exception_dict = dict(xem_exceptions.items() + exception_dict.items())
if not len(exception_dict):
logger.log("Retreived exception list is totally empty. Assuming remote server error not flushing local and stoping now")
return False
myDB = db.DBConnection("cache.db")
# write all the exceptions we got off the net into the database
for cur_tvdb_id in exception_dict:
# get a list of the existing exceptions for this ID
existing_exceptions = [x["show_name"] for x in myDB.select("SELECT * FROM scene_exceptions WHERE tvdb_id = ?", [cur_tvdb_id])]
for cur_exception_dict in exception_dict[cur_tvdb_id]:
# if this exception isn't already in the DB then add it
cur_exception, curSeason = cur_exception_dict.items()[0]
if cur_exception not in existing_exceptions:
myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name, season) VALUES (?,?,?)", [cur_tvdb_id, cur_exception, curSeason])
name_cache.clearCache()
f.close()
def update_scene_exceptions(tvdb_id, scene_exceptions):
"""
Given a tvdb_id, and a list of all show scene exceptions, update the db.
"""
myDB = db.DBConnection("cache.db")
sql_cur_season = myDB.select("SELECT season FROM scene_exceptions WHERE tvdb_id=?", [tvdb_id])
if sql_cur_season:
cur_season = sql_cur_season[0][0]
else:
cur_season =-1
myDB.action('DELETE FROM scene_exceptions WHERE tvdb_id=?', [tvdb_id])
for cur_exception in scene_exceptions:
myDB.action("INSERT INTO scene_exceptions (tvdb_id, show_name, season) VALUES (?,?,?)", [tvdb_id, cur_exception, cur_season])
name_cache.clearCache()
def _xem_excpetions_fetcher():
exception_dict = {}
opener = urllib2.build_opener()
url = "http://thexem.de/map/allNames?origin=tvdb&seasonNumbers=1"
try:
f = opener.open(url)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to XEM. Is thexem.de down ?" + ex(e), logger.ERROR)
return exception_dict
except httplib.InvalidURL, e:
logger.log(u"Invalid XEM host. Is thexem.de down ?: " + ex(e), logger.ERROR)
return exception_dict
if not f:
logger.log(u"Empty response from " + url + ": " + ex(e), logger.ERROR)
return exception_dict
try:
xemJson = json.loads(f.read())
except ValueError, e:
pass
if xemJson['result'] == 'failure':
return exception_dict
for tvdbid, names in xemJson['data'].items():
exception_dict[int(tvdbid)] = names
logger.log(u"xem exception dict: " + str(exception_dict), logger.DEBUG)
return exception_dict
def getSceneSeasons(tvdb_id):
"""get a list of season numbers that have scene excpetions
"""
myDB = db.DBConnection("cache.db")
seasons = myDB.select("SELECT DISTINCT season FROM scene_exceptions WHERE tvdb_id = ?", [tvdb_id])
return [cur_exception["season"] for cur_exception in seasons]
|
mortonjt/scipy
|
refs/heads/master
|
scipy/weave/examples/print_example.py
|
100
|
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
import time
def print_compare(n):
print('Printing %d integers:' % n)
t1 = time.time()
for i in range(n):
print(i, end=' ')
t2 = time.time()
py = (t2-t1)
# get it in cache
inline_tools.inline('printf("%d",i);',['i'])
t1 = time.time()
for i in range(n):
inline_tools.inline('printf("%d",i);',['i'])
t2 = time.time()
print(' speed in python:', py)
print(' speed in c:',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
def cout_example(lst):
# get it in cache
i = lst[0]
inline_tools.inline('std::cout << i << std::endl;',['i'])
t1 = time.time()
for i in lst:
inline_tools.inline('std::cout << i << std::endl;',['i'])
t2 = time.time()
if __name__ == "__main__":
n = 3000
print_compare(n)
print("calling cout with integers:")
cout_example([1,2,3])
print("calling cout with strings:")
cout_example(['a','bb', 'ccc'])
|
petrjasek/superdesk-core
|
refs/heads/master
|
superdesk/factory/sentry.py
|
2
|
import logging
from raven.contrib.flask import Sentry
from raven.contrib.celery import register_signal, register_logger_signal
SENTRY_DSN = "SENTRY_DSN"
class SuperdeskSentry:
"""Sentry proxy that will do nothing in case sentry is not configured."""
def __init__(self, app):
if app.config.get(SENTRY_DSN):
if "verify_ssl" not in app.config[SENTRY_DSN]:
app.config[SENTRY_DSN] += "?verify_ssl=0"
app.config.setdefault("SENTRY_NAME", app.config.get("SERVER_DOMAIN"))
self.sentry = Sentry(app, register_signal=False, wrap_wsgi=False, logging=True, level=logging.WARNING)
register_logger_signal(self.sentry.client)
register_signal(self.sentry.client)
else:
self.sentry = None
def captureException(self, exc_info=None, **kwargs):
if self.sentry:
self.sentry.captureException(exc_info, **kwargs)
def captureMessage(self, message, **kwargs):
if self.sentry:
self.sentry.captureMessage(message, **kwargs)
|
sapcc/monasca-api
|
refs/heads/master
|
monasca_api/v2/reference/metrics.py
|
1
|
# (C) Copyright 2014-2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
import math
import monasca_api.monitoring.client as monitoring_client
from monasca_common.simport import simport
from monasca_common.validation import metrics as metric_validation
from oslo_config import cfg
from oslo_log import log
from monasca_api.api import metrics_api_v2
from monasca_api.common.messaging import (
exceptions as message_queue_exceptions)
from monasca_api.common.messaging.message_formats import (
metrics as metrics_message)
from monasca_api.monitoring.metrics import METRICS_PUBLISH_TIME, METRICS_LIST_TIME, METRICS_STATS_TIME, \
METRICS_RETRIEVE_TIME, METRICS_DIMS_RETRIEVE_TIME, METRICS_REJECTED_COUNT
from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError
from monasca_api.v2.reference import helpers
from monasca_api.v2.reference import resource
LOG = log.getLogger(__name__)
STATSD_CLIENT = monitoring_client.get_client()
STATSD_TIMER = STATSD_CLIENT.get_timer()
def get_merge_metrics_flag(req):
'''Return the value of the optional metrics_flag
Returns False if merge_metrics parameter is not supplied or is not a
string that evaluates to True, otherwise True
'''
merge_metrics_flag = helpers.get_query_param(req,
'merge_metrics',
False,
False)
if merge_metrics_flag is not False:
return helpers.str_2_bool(merge_metrics_flag)
else:
return False
class Metrics(metrics_api_v2.MetricsV2API):
def __init__(self):
try:
super(Metrics, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._post_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.agent_authorized_roles)
self._message_queue = simport.load(cfg.CONF.messaging.driver)(
'metrics')
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
self._statsd_rejected_count = STATSD_CLIENT.get_counter(METRICS_REJECTED_COUNT)
def _send_metrics(self, metrics):
try:
self._message_queue.send_message(metrics)
except message_queue_exceptions.MessageQueueException as ex:
LOG.exception(ex)
raise falcon.HTTPServiceUnavailable('Service unavailable',
ex.message, 60)
def _list_metrics(self, tenant_id, name, dimensions, req_uri, offset,
limit, start_timestamp, end_timestamp):
result = self._metrics_repo.list_metrics(tenant_id,
self._region,
name,
dimensions,
offset, limit,
start_timestamp,
end_timestamp)
return helpers.paginate(result, req_uri, limit)
@resource.resource_try_catch_block
@STATSD_TIMER.timed(METRICS_PUBLISH_TIME, sample_rate=0.01)
def on_post(self, req, res):
helpers.validate_json_content_type(req)
helpers.validate_authorization(req,
self._post_metrics_authorized_roles)
metrics = helpers.read_http_resource(req)
try:
metric_validation.validate(metrics)
except Exception as ex:
LOG.exception(ex)
self._statsd_rejected_count.increment(1)
raise HTTPUnprocessableEntityError("Unprocessable Entity", ex.message)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
transformed_metrics = metrics_message.transform(
metrics, tenant_id, self._region)
self._send_metrics(transformed_metrics)
res.status = falcon.HTTP_204
@resource.resource_try_catch_block
@STATSD_TIMER.timed(METRICS_LIST_TIME)
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
name = helpers.get_query_name(req)
helpers.validate_query_name(name)
dimensions = helpers.get_query_dimensions(req)
helpers.validate_query_dimensions(dimensions)
offset = helpers.get_query_param(req, 'offset')
start_timestamp = helpers.get_query_starttime_timestamp(req, False)
end_timestamp = helpers.get_query_endtime_timestamp(req, False)
helpers.validate_start_end_timestamps(start_timestamp, end_timestamp)
result = self._list_metrics(tenant_id, name,
dimensions, req.uri,
offset, req.limit,
start_timestamp, end_timestamp)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
class MetricsMeasurements(metrics_api_v2.MetricsMeasurementsV2API):
def __init__(self):
try:
super(MetricsMeasurements, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._post_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.agent_authorized_roles)
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
@resource.resource_try_catch_block
@STATSD_TIMER.timed(METRICS_RETRIEVE_TIME)
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
name = helpers.get_query_name(req, True)
helpers.validate_query_name(name)
dimensions = helpers.get_query_dimensions(req)
helpers.validate_query_dimensions(dimensions)
start_timestamp = helpers.get_query_starttime_timestamp(req)
end_timestamp = helpers.get_query_endtime_timestamp(req, False)
helpers.validate_start_end_timestamps(start_timestamp, end_timestamp)
offset = helpers.get_query_param(req, 'offset')
merge_metrics_flag = get_merge_metrics_flag(req)
group_by = helpers.get_query_group_by(req)
result = self._measurement_list(tenant_id, name, dimensions,
start_timestamp, end_timestamp,
req.uri, offset,
req.limit, merge_metrics_flag,
group_by)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def _measurement_list(self, tenant_id, name, dimensions, start_timestamp,
end_timestamp, req_uri, offset,
limit, merge_metrics_flag, group_by):
result = self._metrics_repo.measurement_list(tenant_id,
self._region,
name,
dimensions,
start_timestamp,
end_timestamp,
offset,
limit,
merge_metrics_flag,
group_by)
return helpers.paginate_measurements(result, req_uri, limit)
class MetricsStatistics(metrics_api_v2.MetricsStatisticsV2API):
def __init__(self):
try:
super(MetricsStatistics, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
@resource.resource_try_catch_block
@STATSD_TIMER.timed(METRICS_STATS_TIME)
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
name = helpers.get_query_name(req, True)
helpers.validate_query_name(name)
dimensions = helpers.get_query_dimensions(req)
helpers.validate_query_dimensions(dimensions)
start_timestamp = helpers.get_query_starttime_timestamp(req)
end_timestamp = helpers.get_query_endtime_timestamp(req, False)
helpers.validate_start_end_timestamps(start_timestamp, end_timestamp)
statistics = helpers.get_query_statistics(req)
period = helpers.get_query_period(req)
offset = helpers.get_query_param(req, 'offset')
merge_metrics_flag = get_merge_metrics_flag(req)
group_by = helpers.get_query_group_by(req)
result = self._metric_statistics(tenant_id, name, dimensions,
start_timestamp, end_timestamp,
statistics, period, req.uri,
offset, req.limit, merge_metrics_flag,
group_by)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def _metric_statistics(self, tenant_id, name, dimensions, start_timestamp,
end_timestamp, statistics, period, req_uri,
offset, limit, merge_metrics_flag, group_by):
result = self._metrics_repo.metrics_statistics(tenant_id,
self._region,
name,
dimensions,
start_timestamp,
end_timestamp,
statistics, period,
offset,
limit,
merge_metrics_flag,
group_by)
return helpers.paginate_statistics(result, req_uri, limit)
class MetricsNames(metrics_api_v2.MetricsNamesV2API):
def __init__(self):
try:
super(MetricsNames, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
@resource.resource_try_catch_block
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
dimensions = helpers.get_query_dimensions(req)
helpers.validate_query_dimensions(dimensions)
offset = helpers.get_query_param(req, 'offset')
result = self._list_metric_names(tenant_id, dimensions,
req.uri, offset, req.limit)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def _list_metric_names(self, tenant_id, dimensions, req_uri, offset,
limit):
result = self._metrics_repo.list_metric_names(tenant_id,
self._region,
dimensions)
return helpers.paginate_with_no_id(result, req_uri, offset, limit)
class DimensionValues(metrics_api_v2.DimensionValuesV2API):
def __init__(self):
try:
super(DimensionValues, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
@resource.resource_try_catch_block
@STATSD_TIMER.timed(METRICS_DIMS_RETRIEVE_TIME)
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
metric_name = helpers.get_query_param(req, 'metric_name')
dimension_name = helpers.get_query_param(req, 'dimension_name',
required=True)
offset = helpers.get_query_param(req, 'offset')
result = self._dimension_values(tenant_id, req.uri, metric_name,
dimension_name, offset, req.limit)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def _dimension_values(self, tenant_id, req_uri, metric_name,
dimension_name, offset, limit):
result = self._metrics_repo.list_dimension_values(tenant_id,
self._region,
metric_name,
dimension_name)
return helpers.paginate_with_no_id(result, req_uri, offset, limit)
class DimensionNames(metrics_api_v2.DimensionNamesV2API):
def __init__(self):
try:
super(DimensionNames, self).__init__()
self._region = cfg.CONF.region
self._delegate_authorized_roles = (
cfg.CONF.security.delegate_authorized_roles)
self._get_metrics_authorized_roles = (
cfg.CONF.security.default_authorized_roles +
cfg.CONF.security.read_only_authorized_roles)
self._metrics_repo = simport.load(
cfg.CONF.repositories.metrics_driver)()
except Exception as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError('Service unavailable',
ex.message)
@resource.resource_try_catch_block
def on_get(self, req, res):
helpers.validate_authorization(req, self._get_metrics_authorized_roles)
tenant_id = (
helpers.get_x_tenant_or_tenant_id(req,
self._delegate_authorized_roles))
metric_name = helpers.get_query_param(req, 'metric_name')
offset = helpers.get_query_param(req, 'offset')
result = self._dimension_names(tenant_id, req.uri, metric_name,
offset, req.limit)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def _dimension_names(self, tenant_id, req_uri, metric_name, offset, limit):
result = self._metrics_repo.list_dimension_names(tenant_id,
self._region,
metric_name)
return helpers.paginate_with_no_id(result, req_uri, offset, limit)
|
TangXT/GreatCatMOOC
|
refs/heads/master
|
common/djangoapps/cache_toolbox/middleware.py
|
211
|
"""
Cache-backed ``AuthenticationMiddleware``
-----------------------------------------
``CacheBackedAuthenticationMiddleware`` is an
``django.contrib.auth.middleware.AuthenticationMiddleware`` replacement to
avoid querying the database for a ``User`` instance in each request.
Whilst the built-in ``AuthenticationMiddleware`` mechanism will only obtain the
``User`` instance when it is required, the vast majority of sites will do so on
every page to render "Logged in as 'X'" text as well to evaluate the result of
``user.is_authenticated()`` and ``user.is_superuser`` to provide conditional
functionality.
This middleware eliminates the cost of retrieving this ``User`` instance by
caching it using the ``cache_toolbox`` instance caching mechanisms.
Depending on your average number of queries per page, saving one query per
request can---in aggregate---reduce load on your database. In addition,
avoiding the database entirely for pages can avoid incurring any connection
latency in your environment, resulting in faster page loads for your users.
Saving this data in the cache can also be used as a way of authenticating users
in systems outside of Django that should not access your database. For
example, a "maintenance mode" page would be able to render a personalised
message without touching the database at all but rather authenticating via the
cache.
``CacheBackedAuthenticationMiddleware`` is ``AUTHENTICATION_BACKENDS`` agnostic.
Implementation
~~~~~~~~~~~~~~
The cache and session backends are still accessed on each request - we are
simply assuming that they are cheaper (or otherwise more preferable) to access
than your database. (In the future, signed cookies may allow us to avoid this
lookup altogether -- whilst we could not safely save ``User.password`` in a
cookie, we could use delayed loading to pull it out when needed.)
Another alternative solution would be to store the attributes in the user's
session instead of in the cache. This would save the cache hit on every request
as all the relevant data would be pulled in one go from the session backend.
However, this has two main disadvantages:
* Session keys are not deterministic -- after making changes to an
``auth_user`` row in the database, you cannot determine the user's session
key to flush the now out-of-sync data (and doing so would log them out
anyway).
* Stores data per-session rather than per-user -- if a user logs in from
multiple computers the data is duplicated in each session. This problem is
compounded by most projects wishing to avoid expiring session data as long
as possible (in addition to storing sessions in persistent stores).
Usage
~~~~~
To use, find ``MIDDLEWARE_CLASSES`` in your ``settings.py`` and replace::
MIDDLEWARE_CLASSES = [
...
'django.contrib.auth.middleware.AuthenticationMiddleware',
...
]
with::
MIDDLEWARE_CLASSES = [
...
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
...
]
You should confirm you are using a ``SESSION_ENGINE`` that doesn't query the
database for each request. The built-in ``cached_db`` engine is the safest
choice for most environments but you may be happy with the trade-offs of the
``memcached`` backend - see the Django documentation for more details.
"""
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.auth.middleware import AuthenticationMiddleware
from .model import cache_model
class CacheBackedAuthenticationMiddleware(AuthenticationMiddleware):
def __init__(self):
cache_model(User)
def process_request(self, request):
try:
# Try and construct a User instance from data stored in the cache
request.user = User.get_cached(request.session[SESSION_KEY])
except:
# Fallback to constructing the User from the database.
super(CacheBackedAuthenticationMiddleware, self).process_request(request)
|
mammique/django
|
refs/heads/tp_alpha
|
django/contrib/formtools/tests/wizard/wizardtests/forms.py
|
313
|
import os
import tempfile
from django import forms
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import WizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(WizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data(),
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
def get_context_data(self, form, **kwargs):
context = super(ContactWizard, self).get_context_data(form, **kwargs)
if self.storage.current_step == 'form2':
context.update({'another_var': True})
return context
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
pdarragh/Viper
|
refs/heads/master
|
viper/parser/grammar_parsing/tokenize/grammar_token.py
|
1
|
from typing import ClassVar
class GrammarToken:
def __init__(self, lexeme_class: ClassVar, text=None):
self._lexeme_class = lexeme_class
self._text = text
def __eq__(self, other):
if isinstance(other, GrammarToken):
return self._lexeme_class == other._lexeme_class
return isinstance(other, self._lexeme_class)
def __str__(self):
if self._text is not None:
return self._text
else:
return f'{self._lexeme_class.__name__}Token'
def __repr__(self):
return str(self)
|
fortizc/fiware-orion
|
refs/heads/develop
|
test/acceptance/lettuce/integration/terrain.py
|
8
|
# -*- coding: utf-8 -*-
"""
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
"""
__author__ = 'Jon Calderin Goñi (jon.caldering@gmail.com)'
from lettuce import before, world, after
from integration.tools.general_utils import stop_mock, drop_all_test_databases, check_properties, get_cb_pid, stop_cb
from iotqautils.iotqaLogger import get_logger
@before.all
def before_all():
world.log = get_logger('lettuce', world.config['environment']['log_level'], True, True, 'logs/lettuce.log')
check_properties()
world.entities = None
world.attributes_consult = None
world.attributes_creation = None
world.context_elements = None
world.notify_conditions = None
world.context_registrations = None
world.service = None
world.subservice = None
world.mock = None
world.mock_data = None
world.payloads_count = -1
world.response_count = -1
world.cb_count = -1
world.cb = {}
world.payloads = {}
world.responses = {}
world.cb_config_to_start = ''
world.cb_pid = get_cb_pid()
world.bin_parms = None
drop_all_test_databases(world.config['mongo']['host'], int(world.config['mongo']['port']))
@after.each_scenario
def after_each_scenario(scenario):
stop_mock()
world.cb[world.cb_count].log = None
@after.all
def after_all(total):
drop_all_test_databases(world.config['mongo']['host'], int(world.config['mongo']['port']))
stop_cb()
|
AlbertoAlfredo/exercicios-cursos
|
refs/heads/master
|
Curso-em-video/Python/aulas-python/Desafios/desafio043.py
|
1
|
peso = float(input("Digite seu peso: "))
altura = float(input("Digite sua altura: "))
imc = peso/(altura ** 2)
print("Seu IMC é {:.1f}".format(imc))
if imc < 18.5 :
print("Você está abaixo do peso.")
elif imc <= 25 :
print("Você está no peso ideal.")
elif imc <= 30 :
print("Você está com sobrepeso.")
elif imc <= 40 :
print("Você está obeso.")
else:
print("Você está com obesidade mórbida.")
|
tarzan0820/odoo
|
refs/heads/8.0
|
addons/account_payment/wizard/account_payment_pay.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
#TODO:REMOVE this wizard is not used
class account_payment_make_payment(osv.osv_memory):
_name = "account.payment.make.payment"
_description = "Account make payment"
def launch_wizard(self, cr, uid, ids, context=None):
"""
Search for a wizard to launch according to the type.
If type is manual. just confirm the order.
"""
obj_payment_order = self.pool.get('payment.order')
if context is None:
context = {}
# obj_model = self.pool.get('ir.model.data')
# obj_act = self.pool.get('ir.actions.act_window')
# order = obj_payment_order.browse(cr, uid, context['active_id'], context)
obj_payment_order.set_done(cr, uid, [context['active_id']], context)
return {'type': 'ir.actions.act_window_close'}
# t = order.mode and order.mode.type.code or 'manual'
# if t == 'manual':
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# gw = obj_payment_order.get_wizard(t)
# if not gw:
# obj_payment_order.set_done(cr,uid,context['active_id'],context)
# return {}
#
# module, wizard= gw
# result = obj_model._get_id(cr, uid, module, wizard)
# id = obj_model.read(cr, uid, [result], ['res_id'])[0]['res_id']
# return obj_act.read(cr, uid, [id])[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
chrome/test/ispy/common/mock_cloud_bucket.py
|
122
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Subclass of CloudBucket used for testing."""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import cloud_bucket
class MockCloudBucket(cloud_bucket.BaseCloudBucket):
"""Subclass of CloudBucket used for testing."""
def __init__(self):
"""Initializes the MockCloudBucket with its datastore.
Returns:
An instance of MockCloudBucket.
"""
self.datastore = {}
def Reset(self):
"""Clears the MockCloudBucket's datastore."""
self.datastore = {}
# override
def UploadFile(self, path, contents, content_type):
self.datastore[path] = contents
# override
def DownloadFile(self, path):
if self.datastore.has_key(path):
return self.datastore[path]
else:
raise cloud_bucket.FileNotFoundError
# override
def UpdateFile(self, path, contents):
if not self.FileExists(path):
raise cloud_bucket.FileNotFoundError
self.UploadFile(path, contents, '')
# override
def RemoveFile(self, path):
if self.datastore.has_key(path):
self.datastore.pop(path)
# override
def FileExists(self, path):
return self.datastore.has_key(path)
# override
def GetImageURL(self, path):
if self.datastore.has_key(path):
return path
else:
raise cloud_bucket.FileNotFoundError
# override
def GetAllPaths(self, prefix):
return (item[0] for item in self.datastore.items()
if item[0].startswith(prefix))
|
interstar/OWL
|
refs/heads/master
|
web.py
|
1
|
#!/usr/bin/env python
"""web.py: makes web apps (http://webpy.org)"""
__version__ = "0.1381"
__revision__ = "$Rev: 72 $"
__license__ = "public domain"
__author__ = "Aaron Swartz <me@aaronsw.com>"
__contributors__ = "see http://webpy.org/changes"
# todo:
# - get rid of upvars
# - break up into separate files
# - provide an option to use .write()
# - allow people to do $self.id from inside a reparam
# - add sqlite support
# - convert datetimes, floats in WebSafe
# - locks around memoize
# - fix memoize to use cacheify style techniques
# - merge curval query with the insert
# - figure out how to handle squid, etc. for web.ctx.ip
import os, os.path, sys, time, types, traceback, threading
import cgi, re, urllib, urlparse, Cookie, pprint
from threading import currentThread
from tokenize import tokenprog
iters = (list, tuple)
if hasattr(__builtins__, 'set') or (
hasattr(__builtins__, 'has_key') and __builtins__.has_key('set')):
iters += (set,)
try:
from sets import Set
iters += (Set,)
except ImportError:
pass
try:
import datetime, itertools
except ImportError:
pass
try:
from Cheetah.Compiler import Compiler
from Cheetah.Filters import Filter
_hasTemplating = True
except ImportError:
_hasTemplating = False
try:
from DBUtils.PooledDB import PooledDB
_hasPooling = True
except ImportError:
_hasPooling = False
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
## General Utilities
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""removes the string `remove` from the right of `text`"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""removes the string `remove` from the left of `text`"""
return _strips('l', text, remove)
def strips(text, remove):
"""removes the string `remove` from the both sides of `text`"""
return rstrips(lstrips(text, remove), remove)
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
#locals = sys._getframe(1).f_locals
#self = locals['self']
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
instead of `obj['foo']`. Create one by doing `storage({'a':1})`.
"""
def __getattr__(self, key):
if self.has_key(key):
return self[key]
raise AttributeError, repr(key)
def __setattr__(self, key, value):
self[key] = value
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
"""
def getvalue(x):
if hasattr(x, 'value'):
return x.value
else:
return x
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if key not in self.cache:
self.cache[key] = self.func(*args, **keywords)
return self.cache[key]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""Like re.sub, but returns the replacement _and_ the match object."""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
For example, `list(group([1,2,3,4], 2))` returns `[[1,2],[3,4]]`.
"""
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
yield [seq.next() for i in xrange(size)]
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i > self.c:
raise IndexError, "already passed "+str(i)
try:
while i < self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
iterbetter = IterBetter
def dictreverse(mapping):
"""Takes a dictionary like `{1:2, 3:4}` and returns `{2:1, 4:3}`."""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(dict_a, dict_b):
"""
Returns a dictionary consisting of the keys in `a` and `b`.
If they share a key, the value from b is used.
"""
result = {}
result.update(dict_a)
result.update(dict_b)
return result
sumdicts = dictadd # deprecated
def listget(lst, ind, default=None):
"""Returns `lst[ind]` if it exists, `default` otherwise."""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""Returns `integer` as an int or `default` if it can't."""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""Converts a (UTC) datetime object to a nice string representation."""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not now: now = datetime.datetime.utcnow()
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def upvars(level=2):
"""Guido van Rossum doesn't want you to use this function."""
return dictadd(
sys._getframe(level).f_globals,
sys._getframe(level).f_locals)
class CaptureStdout:
"""
Captures everything func prints to stdout and returns it instead.
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, tempfile ##, time already imported
temp = tempfile.NamedTemporaryFile()
prof = hotshot.Profile(temp.name)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
stats = hotshot.stats.load(temp.name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += capturestdout(stats.print_stats)(40)
x += capturestdout(stats.print_callers)()
return result, x
profile = Profile
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Takes a dictionary that maps threads to objects.
When a thread tries to get or set an attribute or item
of the threadeddict, it passes it on to the object
for that thread in dictionary.
"""
def __init__(self, dictionary):
self.__dict__['_ThreadedDict__d'] = dictionary
def __getattr__(self, attr):
return getattr(self.__d[currentThread()], attr)
def __getitem__(self, item):
return self.__d[currentThread()][item]
def __setattr__(self, attr, value):
if attr == '__doc__':
self.__dict__[attr] = value
else:
return setattr(self.__d[currentThread()], attr, value)
def __setitem__(self, item, value):
self.__d[currentThread()][item] = value
def __hash__(self):
return hash(self.__d[currentThread()])
threadeddict = ThreadedDict
## IP Utilities
def validipaddr(address):
"""returns True if `address` is a valid IPv4 address"""
try:
octets = address.split('.')
assert len(octets) == 4
for x in octets:
assert 0 <= int(x) <= 255
except (AssertionError, ValueError):
return False
return True
def validipport(port):
"""returns True if `port` is a valid IPv4 port"""
try:
assert 0 <= int(port) <= 65535
except (AssertionError, ValueError):
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""returns either (ip_address, port) or "/path/to/socket" from string_"""
if '/' in string_:
return string_
else:
return validip(string_)
## URL Utilities
def prefixurl(base=''):
"""
Sorry, this function is really difficult to explain.
Maybe some other time.
"""
url = ctx.path.lstrip('/')
for i in xrange(url.count('/')):
base += '../'
if not base:
base = './'
return base
def urlquote(x): return urllib.quote(websafe(x).encode('utf-8'))
## Formatting
try:
from markdown import markdown # http://webpy.org/markdown.py
except ImportError:
pass
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
## Databases
class _ItplError(ValueError):
"""String Interpolation Error
from <http://lfw.org/python/Itpl.py>
(cf. below for license)
"""
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
For example:
web.sqlors('foo =', [1,2,3])
would result in:
foo = 1 OR foo = 2 OR foo = 3
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return ("2+2=5", [])
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return '(' + left + \
(' OR ' + left).join([aparam() for param in lst]) + ")", lst
else:
return left + aparam(), [lst]
class UnknownParamstyle(Exception):
"""raised for unsupported db paramstyles
Currently supported: qmark,numeric, format, pyformat
"""
pass
def aparam():
"""Use in a SQL string to make a spot for a db value."""
style = ctx.db_module.paramstyle
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, style
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns a 2-tuple containing
the a string with `aparam()`s in it and a list of the matching values.
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
"""
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
result.append(aparam())
vals.append(eval(chunk, dictionary))
else: result.append(chunk)
return ''.join(result), vals
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
def connect(dbn, **keywords):
"""
Connects to the specified database.
db currently must be "postgres" or "mysql".
If DBUtils is installed, connection pooling will be used.
"""
if dbn == "postgres":
try:
import psycopg2 as db
except ImportError:
try:
import psycopg as db
except ImportError:
import pgdb as db
keywords['password'] = keywords['pw']
del keywords['pw']
keywords['database'] = keywords['db']
del keywords['db']
elif dbn == "mysql":
import MySQLdb as db
keywords['passwd'] = keywords['pw']
del keywords['pw']
db.paramstyle = 'pyformat' # it's both, like psycopg
elif dbn == "sqlite":
try: ## try first sqlite3 version
from pysqlite2 import dbapi2 as db
db.paramstyle = 'qmark'
except ImportError: ## else try sqlite2
import sqlite as db
keywords['database'] = keywords['db']
del keywords['db']
else:
raise UnknownDB, dbn
ctx.db_name = dbn
ctx.db_module = db
ctx.db_transaction = False
if _hasPooling:
if 'db' not in globals():
globals()['db'] = PooledDB(dbapi=db, **keywords)
ctx.db = globals()['db'].connection()
else:
ctx.db = db.connect(**keywords)
ctx.dbq_count = 0
if globals().get('db_printing'):
def db_execute(cur, sql_query, d=None):
"""executes an sql query"""
def sqlquote(obj):
"""converts `obj` to its proper SQL version"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
return repr(obj)
ctx.dbq_count += 1
try:
outq = sql_query % tuple(map(sqlquote, d))
except TypeError:
outq = sql_query
print >> debug, str(ctx.dbq_count)+':', outq
a = time.time()
out = cur.execute(sql_query, d)
b = time.time()
print >> debug, '(%s)' % round(b - a, 2)
return out
ctx.db_execute = db_execute
else:
ctx.db_execute = lambda cur, sql_query, d=None: \
cur.execute(sql_query, d)
return ctx.db
def transact():
"""Start a transaction."""
# commit everything up to now, so we don't rollback it later
ctx.db.commit()
ctx.db_transaction = True
def commit():
"""Commits a transaction."""
ctx.db.commit()
ctx.db_transaction = False
def rollback():
"""Rolls back a transaction."""
ctx.db.rollback()
ctx.db_transaction = False
def query(sql_query, vars=None, processed=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
"""
if vars is None:
vars = {}
db_cursor = ctx.db.cursor()
if not processed:
sql_query, vars = reparam(sql_query, vars)
ctx.db_execute(db_cursor, sql_query, vars)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield Storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [Storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not ctx.db_transaction:
ctx.db.commit()
return out
def sqllist(lst):
"""
If a list, converts it to a comma-separated string.
Otherwise, returns the string.
"""
if isinstance(lst, str):
return lst
else: return ', '.join(lst)
def sqlwhere(dictionary):
"""
Converts a `dictionary` to an SQL WHERE clause in
`reparam` format. Thus,
{'cust_id': 2, 'order_id':3}
would result in the equivalent of:
'cust_id = 2 AND order_id = 3'
but properly quoted.
"""
return ' AND '.join([
'%s = %s' % (k, aparam()) for k in dictionary.keys()
]), dictionary.values()
def select(tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset. Uses vars to interpolate.
Otherwise, each clause can take a reparam-style list.
"""
if vars is None:
vars = {}
values = []
qout = ""
for (sql, val) in (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset)):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nquery, nvalue = 'id = '+aparam(), [val]
else:
nquery, nvalue = str(val), ()
elif isinstance(val, (list, tuple)) and len(val) == 2:
nquery, nvalue = val
elif val:
nquery, nvalue = reparam(val, vars)
else:
continue
qout += " " + sql + " " + nquery
values.extend(nvalue)
return query(qout, values, processed=True)
def insert(tablename, seqname=None, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
"""
db_cursor = ctx.db.cursor()
if values:
sql_query, v = "INSERT INTO %s (%s) VALUES (%s)" % (
tablename,
", ".join(values.keys()),
', '.join([aparam() for x in values])
), values.values()
else:
sql_query, v = "INSERT INTO %s DEFAULT VALUES" % tablename, None
if seqname is False:
pass
elif ctx.db_name == "postgres":
if seqname is None:
seqname = tablename + "_id_seq"
sql_query += "; SELECT currval('%s')" % seqname
elif ctx.db_name == "mysql":
ctx.db_execute(db_cursor, sql_query, v)
sql_query = "SELECT last_insert_id()"
v = ()
elif ctx.db_name == "sqlite":
ctx.db_execute(db_cursor, sql_query, v)
# not really the same...
sql_query = "SELECT last_insert_rowid()"
v = ()
ctx.db_execute(db_cursor, sql_query, v)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not ctx.db_transaction:
ctx.db.commit()
return out
def update(tables, where, vars=None, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
"""
if vars is None:
vars = {}
if isinstance(where, (int, long)):
vars = [where]
where = "id = " + aparam()
elif isinstance(where, (list, tuple)) and len(where) == 2:
where, vars = where
else:
where, vars = reparam(where, vars)
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, "UPDATE %s SET %s WHERE %s" % (
sqllist(tables),
', '.join([k + '=' + aparam() for k in values.keys()]),
where),
values.values() + vars)
if not ctx.db_transaction:
ctx.db.commit()
return db_cursor.rowcount
def delete(table, where, using=None, vars=None):
"""
Deletes from `table` with clauses `where` and `using`.
"""
if vars is None:
vars = {}
db_cursor = ctx.db.cursor()
if isinstance(where, (int, long)):
vars = [where]
where = "id = " + aparam()
elif isinstance(where, (list, tuple)) and len(where) == 2:
where, vars = where
else:
where, vars = reparam(where, vars)
q = 'DELETE FROM %s WHERE %s' % (table, where)
if using:
q += ' USING ' + sqllist(using)
ctx.db_execute(db_cursor, q, vars)
if not ctx.db_transaction:
ctx.db.commit()
return db_cursor.rowcount
## Request Handlers
def handle(mapping, fvars=None):
"""
Call the appropriate function based on the url to function mapping in `mapping`.
If no module for the function is specified, look up the function in `fvars`. If
`fvars` is empty, using the caller's context.
`mapping` should be a tuple of paired regular expressions with function name
substitutions. `handle` will import modules as necessary.
"""
for url, ofno in group(mapping, 2):
if isinstance(ofno, tuple):
ofn, fna = ofno[0], list(ofno[1:])
else:
ofn, fna = ofno, []
fn, result = re_subm('^' + url + '$', ofn, ctx.path)
if result: # it's a match
if fn.split(' ', 1)[0] == "redirect":
url = fn.split(' ', 1)[1]
if ctx.method == "GET":
x = ctx.env.get('QUERY_STRING', '')
if x:
url += '?' + x
return redirect(url)
elif '.' in fn:
x = fn.split('.')
mod, cls = '.'.join(x[:-1]), x[-1]
mod = __import__(mod, globals(), locals(), [""])
cls = getattr(mod, cls)
else:
cls = fn
mod = fvars or upvars()
if isinstance(mod, types.ModuleType):
mod = vars(mod)
try:
cls = mod[cls]
except KeyError:
return notfound()
meth = ctx.method
if meth == "HEAD":
if not hasattr(cls, meth):
meth = "GET"
if not hasattr(cls, meth):
return nomethod(cls)
tocall = getattr(cls(), meth)
args = list(result.groups())
for d in re.findall(r'\\(\d+)', ofn):
args.pop(int(d) - 1)
return tocall(*([urllib.unquote(x) for x in args] + fna))
return notfound()
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
return notfound()
else:
return notfound()
return internal
def background(func):
"""A function decorator to run a long-running function as a background thread."""
def internal(*a, **kw):
data() # cache it
ctx = _context[currentThread()]
_context[currentThread()] = storage(ctx.copy())
def newfunc():
_context[currentThread()] = ctx
func(*a, **kw)
t = threading.Thread(target=newfunc)
background.threaddb[id(t)] = t
t.start()
ctx.headers = []
return seeother(changequery(_t=id(t)))
return internal
background.threaddb = {}
def backgrounder(func):
def internal(*a, **kw):
i = input(_method='get')
if '_t' in i:
try:
t = background.threaddb[int(i._t)]
except KeyError:
return notfound()
_context[currentThread()] = _context[t]
return
else:
return func(*a, **kw)
return internal
## HTTP Functions
def httpdate(date_obj):
"""Formats a datetime object for use in HTTP headers."""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""Parses an HTTP date into a datetime object."""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def expires(delta):
"""
Outputs an `Expires` header for `delta` from now.
`delta` is a `timedelta` object or a number of seconds.
"""
try:
datetime
except NameError:
raise Exception, "requires Python 2.3 or later"
if isinstance(delta, (int, long)):
delta = datetime.timedelta(seconds=delta)
date_obj = datetime.datetime.utcnow() + delta
header('Expires', httpdate(date_obj))
def lastmodified(date_obj):
"""Outputs a `Last-Modified` header for `datetime`."""
header('Last-Modified', httpdate(date_obj))
def modified(date=None, etag=None):
n = ctx.env.get('HTTP_IF_NONE_MATCH')
m = parsehttpdate(ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
validate = False
if etag:
raise NotImplementedError, "no etag support yet"
# should really be a warning
if date and m:
# we subtract a second because
# HTTP dates don't have sub-second precision
if date-datetime.timedelta(seconds=1) <= m:
validate = True
if validate: ctx.status = '304 Not Modified'
return not validate
"""
By default, these all return simple error messages that send very short messages
(like "bad request") to the user. They can and should be overridden
to return nicer ones.
"""
def redirect(url, status='301 Moved Permanently'):
"""
Returns a `status` redirect to the new URL.
`url` is joined with the base URL so that things like
`redirect("about") will work properly.
"""
newloc = urlparse.urljoin(ctx.home + ctx.path, url)
ctx.status = status
ctx.output = ''
header('Content-Type', 'text/html')
header('Location', newloc)
# seems to add a three-second delay for some reason:
# output('<a href="'+ newloc + '">moved permanently</a>')
def found(url):
"""A `302 Found` redirect."""
return redirect(url, '302 Found')
def seeother(url):
"""A `303 See Other` redirect."""
return redirect(url, '303 See Other')
def tempredirect(url):
"""A `307 Temporary Redirect` redirect."""
return redirect(url, '307 Temporary Redirect')
def badrequest():
"""Return a `400 Bad Request` error."""
ctx.status = '400 Bad Request'
header('Content-Type', 'text/html')
return output('bad request')
def notfound():
"""Returns a `404 Not Found` error."""
ctx.status = '404 Not Found'
header('Content-Type', 'text/html')
return output('not found')
def nomethod(cls):
"""Returns a `405 Method Not Allowed` error for `cls`."""
ctx.status = '405 Method Not Allowed'
header('Content-Type', 'text/html')
header('Allow', \
', '.join([method for method in \
['GET', 'HEAD', 'POST', 'PUT', 'DELETE'] \
if hasattr(cls, method)]))
# commented out for the same reason redirect is
# return output('method not allowed')
def gone():
"""Returns a `410 Gone` error."""
ctx.status = '410 Gone'
header('Content-Type', 'text/html')
return output("gone")
def internalerror():
"""Returns a `500 Internal Server` error."""
ctx.status = "500 Internal Server Error"
ctx.headers = [('Content-Type', 'text/html')]
ctx.output = "internal server error"
# adapted from Django <djangoproject.com>
# Copyright (c) 2005, the Lawrence Journal-World
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
DJANGO_500_PAGE = """#import inspect
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>$exception_type at $ctx.path</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table {
border:1px solid #ccc; border-collapse: collapse; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%;}
table td.code div { overflow:hidden; }
table.source th { color:#666; }
table.source td {
font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; }
ul.traceback li.frame { margin-bottom:1em; }
div.context { margin: 10px 0; }
div.context ol {
padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li {
font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
div.context ol.context-line li { color:black; background-color:#ccc; }
div.context ol.context-line li span { float: right; }
div.commands { margin-left: 40px; }
div.commands a { color:black; text-decoration:none; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
</style>
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon;
// Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
//-->
</script>
</head>
<body>
<div id="summary">
<h1>$exception_type at $ctx.path</h1>
<h2>$exception_value</h2>
<table><tr>
<th>Python</th>
<td>$lastframe.filename in $lastframe.function, line $lastframe.lineno</td>
</tr><tr>
<th>Web</th>
<td>$ctx.method $ctx.home$ctx.path</td>
</tr></table>
</div>
<div id="traceback">
<h2>Traceback <span>(innermost first)</span></h2>
<ul class="traceback">
#for frame in $frames
<li class="frame">
<code>$frame.filename</code> in <code>$frame.function</code>
#if $frame.context_line
<div class="context" id="c$frame.id">
#if $frame.pre_context
<ol start="$frame.pre_context_lineno" class="pre-context" id="pre$frame.id">#for line in $frame.pre_context#<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>#end for#</ol>
#end if
<ol start="$frame.lineno" class="context-line"><li onclick="toggle('pre$frame.id', 'post$frame.id')">$frame.context_line <span>...</span></li></ol>
#if $frame.post_context
<ol start='$(frame.lineno+1)' class="post-context" id="post$frame.id">#for line in $frame.post_context#<li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>#end for#</ol>
#end if
</div>
#end if
#if $frame.vars
<div class="commands">
<a href='#' onclick="return varToggle(this, '$frame.id')"><span>▶</span> Local vars</a>## $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
</div>
<table class="vars" id="v$frame.id">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set frameitems = $frame.vars
#silent frameitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in frameitems
<tr>
<td>$key</td>
<td class="code"><div>$prettify(val)</div></td>
</tr>
#end for
</tbody>
</table>
#end if
</li>
#end for
</ul>
</div>
<div id="requestinfo">
#if $context_.output or $context_.headers
<h2>Response so far</h2>
<h3>HEADERS</h3>
#if $ctx.headers
<p class="req"><code>
#for (k, v) in $context_.headers
$k: $v<br />
#end for
</code></p>
#else
<p>No headers.</p>
#end if
<h3>BODY</h3>
<p class="req" style="padding-bottom: 2em"><code>
$context_.output
</code></p>
#end if
<h2>Request information</h2>
<h3>INPUT</h3>
#if $input_
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $input_.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in myitems
<tr>
<td>$key</td>
<td class="code"><div>$val</div></td>
</tr>
#end for
</tbody>
</table>
#else
<p>No input data.</p>
#end if
<h3 id="cookie-info">COOKIES</h3>
#if $cookies_
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#for (key, val) in $cookies_.items()
<tr>
<td>$key</td>
<td class="code"><div>$val</div></td>
</tr>
#end for
</tbody>
</table>
#else
<p>No cookie data</p>
#end if
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $context_.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in $myitems
#if not $key.startswith('_') and $key not in ['env', 'output', 'headers', 'environ', 'status', 'db_execute']
<tr>
<td>$key</td>
<td class="code"><div>$prettify($val)</div></td>
</tr>
#end if
#end for
</tbody>
</table>
<h3 id="meta-info">ENVIRONMENT</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
#set myitems = $context_.env.items()
#silent myitems.sort(lambda x,y: cmp(x[0], y[0]))
#for (key, val) in $myitems
<tr>
<td>$key</td>
<td class="code"><div>$prettify($val)</div></td>
</tr>
#end for
</tbody>
</table>
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>web.internalerror</code>
set to <code>web.debugerror</code>. Change that if you want a different one.
</p>
</div>
</body>
</html>"""
def djangoerror():
def _get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
try:
source = open(filename).readlines()
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = \
[line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = \
[line.strip('\n') for line in source[lineno + 1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
except (OSError, IOError):
return None, [], None, []
exception_type, exception_value, tback = sys.exc_info()
frames = []
while tback is not None:
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
lineno = tback.tb_lineno - 1
pre_context_lineno, pre_context, context_line, post_context = \
_get_lines_from_file(filename, lineno, 7)
frames.append({
'tback': tback,
'filename': filename,
'function': function,
'lineno': lineno,
'vars': tback.tb_frame.f_locals.items(),
'id': id(tback),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno,
})
tback = tback.tb_next
lastframe = frames[-1]
frames.reverse()
urljoin = urlparse.urljoin
input_ = input()
cookies_ = cookies()
context_ = ctx
def prettify(x):
try:
out = pprint.pformat(x)
except Exception, e:
out = '[could not display: <' + e.__class__.__name__ + \
': '+str(e)+'>]'
return out
return render(DJANGO_500_PAGE, asTemplate=True, isString=True)
def debugerror():
"""
A replacement for `internalerror` that presents a nice page with lots
of debug information for the programmer.
(Based on the beautiful 500 page from [Django](http://djangoproject.com/),
designed by [Wilson Miner](http://wilsonminer.com/).)
Requires [Cheetah](http://cheetahtemplate.org/).
"""
# need to do django first, so it can get the old stuff
if _hasTemplating:
out = str(djangoerror())
else:
# Cheetah isn't installed
out = """<p>You've set web.py to use the fancier debugerror error
messages, but these messages require you install the Cheetah template
system. For more information, see
<a href="http://webpy.org/">the web.py website</a>.</p>
<p>In the meantime, here's a plain old error message:</p>
<pre>%s</pre>
<p>(If it says something about 'Compiler', then it's probably
because you're trying to use templates and you haven't
installed Cheetah. See above.)</p>
""" % htmlquote(traceback.format_exc())
ctx.status = "500 Internal Server Error"
ctx.headers = [('Content-Type', 'text/html')]
ctx.output = out
## Rendering
r_include = re_compile(r'(?!\\)#include \"(.*?)\"($|#)', re.M)
def __compiletemplate(template, base=None, isString=False):
if isString:
text = template
else:
text = open('templates/'+template).read()
# implement #include at compile-time
def do_include(match):
text = open('templates/'+match.groups()[0]).read()
return text
while r_include.findall(text):
text = r_include.sub(do_include, text)
execspace = _compiletemplate.bases.copy()
tmpl_compiler = Compiler(source=text, mainClassName='GenTemplate')
tmpl_compiler.addImportedVarNames(execspace.keys())
exec str(tmpl_compiler) in execspace
if base:
_compiletemplate.bases[base] = execspace['GenTemplate']
return execspace['GenTemplate']
_compiletemplate = memoize(__compiletemplate)
_compiletemplate.bases = {}
def htmlquote(text):
"""Encodes `text` for raw use in HTML."""
text = text.replace("&", "&") # Must be done first!
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("'", "'")
text = text.replace('"', """)
return text
def websafe(val):
"""
Converts `val` so that it's safe for use in HTML.
HTML metacharacters are encoded,
None becomes the empty string, and
unicode is converted to UTF-8.
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
return htmlquote(val)
if _hasTemplating:
class WebSafe(Filter):
def filter(self, val, **keywords):
return websafe(val)
def render(template, terms=None, asTemplate=False, base=None,
isString=False):
"""
Renders a template, caching where it can.
`template` is the name of a file containing the a template in
the `templates/` folder, unless `isString`, in which case it's the
template itself.
`terms` is a dictionary used to fill the template. If it's None, then
the caller's local variables are used instead, plus context, if it's not
already set, is set to `context`.
If asTemplate is False, it `output`s the template directly. Otherwise,
it returns the template object.
If the template is a potential base template (that is, something other templates)
can extend, then base should be a string with the name of the template. The
template will be cached and made available for future calls to `render`.
Requires [Cheetah](http://cheetahtemplate.org/).
"""
# terms=['var1', 'var2'] means grab those variables
if isinstance(terms, list):
new = {}
old = upvars()
for k in terms:
new[k] = old[k]
terms = new
# default: grab all locals
elif terms is None:
terms = {'context': context, 'ctx':ctx}
terms.update(sys._getframe(1).f_locals)
# terms=d means use d as the searchList
if not isinstance(terms, tuple):
terms = (terms,)
if not isString and template.endswith('.html'):
header('Content-Type','text/html; charset=utf-8', unique=True)
compiled_tmpl = _compiletemplate(template, base=base, isString=isString)
compiled_tmpl = compiled_tmpl(searchList=terms, filter=WebSafe)
if asTemplate:
return compiled_tmpl
else:
return output(str(compiled_tmpl))
## Input Forms
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
from cStringIO import StringIO
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
_method = defaults.pop('_method', 'both')
e = ctx.env.copy()
out = {}
if _method.lower() in ['both', 'post']:
a = {}
if e['REQUEST_METHOD'] == 'POST':
a = cgi.FieldStorage(fp = StringIO(data()), environ=e,
keep_blank_values=1)
a = dictify(a)
out = dictadd(out, a)
if _method.lower() in ['both', 'get']:
e['REQUEST_METHOD'] = 'GET'
a = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
out = dictadd(out, a)
try:
return storify(out, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration
def data():
"""Returns the data sent with the request."""
if 'data' not in ctx:
cl = intget(ctx.env.get('CONTENT_LENGTH'), 0)
ctx.data = ctx.env['wsgi.input'].read(cl)
return ctx.data
def changequery(**kw):
"""
Imagine you're at `/foo?a=1&b=2`. Then `changequery(a=3)` will return
`/foo?a=3&b=2` -- the same URL but with the arguments you requested
changed.
"""
query = input(_method='get')
for k, v in kw.iteritems():
if v is None:
query.pop(k, None)
else:
query[k] = v
out = ctx.path
if query:
out += '?' + urllib.urlencode(query)
return out
## Cookies
def setcookie(name, value, expires="", domain=None):
"""Sets a cookie."""
if expires < 0:
expires = -1000000000
kargs = {'expires': expires, 'path':'/'}
if domain:
kargs['domain'] = domain
# @@ should we limit cookies to a different path?
cookie = Cookie.SimpleCookie()
cookie[name] = value
for key, val in kargs.iteritems():
cookie[name][key] = val
header('Set-Cookie', cookie.items()[0][1].OutputString())
def cookies(*requireds, **defaults):
"""
Returns a `storage` object with all the cookies in it.
See `storify` for how `requireds` and `defaults` work.
"""
cookie = Cookie.SimpleCookie()
cookie.load(ctx.env.get('HTTP_COOKIE', ''))
try:
return storify(cookie, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration
## WSGI Sugar
def header(hdr, value, unique=False):
"""
Adds the header `hdr: value` with the response.
If `unique` is True and a header with that name already exists,
it doesn't add a new one. If `unique` is None and a header with
that name already exists, it replaces it with this one.
"""
if unique is True:
for h, v in ctx.headers:
if h == hdr: return
elif unique is None:
ctx.headers = [h for h in ctx.headers if h[0] != hdr]
ctx.headers.append((hdr, value))
def output(string_):
"""Appends `string_` to the response."""
if isinstance(string_, unicode): string_ = string_.encode('utf8')
if ctx.get('flush'):
ctx._write(string_)
else:
ctx.output += str(string_)
def flush():
ctx.flush = True
return flush
def write(cgi_response):
"""
Converts a standard CGI-style string response into `header` and
`output` calls.
"""
cgi_response = str(cgi_response)
cgi_response.replace('\r\n', '\n')
head, body = cgi_response.split('\n\n', 1)
lines = head.split('\n')
for line in lines:
if line.isspace():
continue
hdr, value = line.split(":", 1)
value = value.strip()
if hdr.lower() == "status":
ctx.status = value
else:
header(hdr, value)
output(body)
def webpyfunc(inp, fvars=None, autoreload=False):
"""If `inp` is a url mapping, returns a function that calls handle."""
if not fvars:
fvars = upvars()
if not hasattr(inp, '__call__'):
if autoreload:
# black magic to make autoreload work:
mod = \
__import__(
fvars['__file__'].split(os.path.sep).pop().split('.')[0])
#@@probably should replace this with some inspect magic
name = dictfind(fvars, inp)
func = lambda: handle(getattr(mod, name), mod)
else:
func = lambda: handle(inp, fvars)
else:
func = inp
return func
def wsgifunc(func, *middleware):
"""Returns a WSGI-compatible function from a webpy-function."""
middleware = list(middleware)
if reloader in middleware:
relr = reloader(None)
relrcheck = relr.check
middleware.remove(reloader)
else:
relr = None
relrcheck = lambda: None
def wsgifunc(env, start_resp):
_load(env)
relrcheck()
try:
result = func()
except StopIteration:
result = None
is_generator = result and hasattr(result, 'next')
if is_generator:
# wsgi requires the headers first
# so we need to do an iteration
# and save the result for later
try:
firstchunk = result.next()
except StopIteration:
firstchunk = ''
status, headers, output = ctx.status, ctx.headers, ctx.output
ctx._write = start_resp(status, headers)
# and now, the fun:
def cleanup():
# we insert this little generator
# at the end of our itertools.chain
# so that it unloads the request
# when everything else is done
yield '' # force it to be a generator
_unload()
# result is the output of calling the webpy function
# it could be a generator...
if is_generator:
if firstchunk is flush:
# oh, it's just our special flush mode
# ctx._write is set up, so just continue execution
try:
result.next()
except StopIteration:
pass
_unload()
return []
else:
return itertools.chain([firstchunk], result, cleanup())
# ... but it's usually just None
#
# output is the stuff in ctx.output
# it's usually a string...
if isinstance(output, str): #@@ other stringlikes?
_unload()
return [output]
# it could be a generator...
elif hasattr(output, 'next'):
return itertools.chain(output, cleanup())
else:
_unload()
raise Exception, "Invalid web.ctx.output"
for mw_func in middleware:
wsgifunc = mw_func(wsgifunc)
if relr:
relr.func = wsgifunc
return wsgifunc
return wsgifunc
def run(inp, *middleware):
"""
Starts handling requests. If called in a CGI or FastCGI context, it will follow
that protocol. If called from the command line, it will start an HTTP
server on the port named in the first command line argument, or, if there
is no argument, on port 8080.
`input` is a callable, then it's called with no arguments.
Otherwise, it's a `mapping` object to be passed to `handle(...)`.
**Caveat:** So that `reloader` will work correctly, input has to be a variable,
it can't be a tuple passed in directly.
`middleware` is a list of WSGI middleware which is applied to the resulting WSGI
function.
"""
autoreload = reloader in middleware
fvars = upvars()
return runwsgi(wsgifunc(webpyfunc(inp, fvars, autoreload), *middleware))
def runwsgi(func):
"""
Runs a WSGI-compatible function using FCGI, SCGI, or a simple web server,
as appropriate.
"""
#@@ improve detection
if os.environ.has_key('SERVER_SOFTWARE'): # cgi
os.environ['FCGI_FORCE_CGI'] = 'Y'
if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
or os.environ.has_key('SERVER_SOFTWARE')
or 'fcgi' in sys.argv or 'fastcgi' in sys.argv):
return runfcgi(func)
if 'scgi' in sys.argv:
return runscgi(func)
# command line:
return runsimple(func, validip(listget(sys.argv, 1, '')))
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout, socket_timeout:
return
except:
print >> debug, traceback.format_exc(),
internalerror()
if not self.wsgi_sent_headers:
self.wsgi_start_response(ctx.status, ctx.headers)
self.wsgi_write_data(ctx.output)
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status [:status.find(' ')]
status_msg = status [status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, func, server_address):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print "Launching server: http://%s:%d/" % server_address
WSGIServer(func, server_address).serve_forever()
def makeserver(wsgi_server):
"""Updates a flup-style WSGIServer with web.py-style error support."""
class MyServer(wsgi_server):
def error(self, req):
w = req.stdout.write
internalerror()
w('Status: ' + ctx.status + '\r\n')
for (h, v) in ctx.headers:
w(h + ': ' + v + '\r\n')
w('\r\n' + ctx.output)
return MyServer
def runfcgi(func):
"""Runs a WSGI-function with a FastCGI server."""
from flup.server.fcgi import WSGIServer
if len(sys.argv) > 2: # progname, scgi
args = sys.argv[:]
if 'fastcgi' in args: args.remove('fastcgi')
elif 'fcgi' in args: args.remove('fcgi')
hostport = validaddr(args[1])
elif len(sys.argv) > 1:
hostport = ('localhost', 8000)
else:
hostport = None
return makeserver(WSGIServer)(func, multiplexed=True, bindAddress=hostport).run()
def runscgi(func):
"""Runs a WSGI-function with an SCGI server."""
from flup.server.scgi import WSGIServer
my_server = makeserver(WSGIServer)
if len(sys.argv) > 2: # progname, scgi
args = sys.argv[:]
args.remove('scgi')
hostport = validaddr(args[1])
else:
hostport = ('localhost', 4000)
return my_server(func, bindAddress=hostport).run()
## Debugging
def debug(*args):
"""
Prints a prettyprinted version of `args` to stderr.
"""
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
for arg in args:
print >> out, pprint.pformat(arg)
return ''
def debugwrite(x):
"""writes debug data to error stream"""
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
out.write(x)
debug.write = debugwrite
class Reloader:
"""
Before every request, checks to see if any loaded modules have changed on
disk and, if so, reloads them.
"""
def __init__(self, func):
self.func = func
self.mtimes = {}
global _compiletemplate
b = _compiletemplate.bases
_compiletemplate = globals()['__compiletemplate']
_compiletemplate.bases = b
def check(self):
for mod in sys.modules.values():
try:
mtime = os.stat(mod.__file__).st_mtime
except (AttributeError, OSError, IOError):
continue
if mod.__file__.endswith('.pyc') and \
os.path.exists(mod.__file__[:-1]):
mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
if mod not in self.mtimes:
self.mtimes[mod] = mtime
elif self.mtimes[mod] < mtime:
try:
reload(mod)
except ImportError:
pass
return True
def __call__(self, e, o):
self.check()
return self.func(e, o)
reloader = Reloader
def profiler(app):
"""Outputs basic profiling information at the bottom of each response."""
def profile_internal(e, o):
out, result = profile(app)(e, o)
return out + ['<pre>' + result + '</pre>'] #@@encode
return profile_internal
## Context
class _outputter:
"""Wraps `sys.stdout` so that print statements go into the response."""
def write(self, string_):
if hasattr(ctx, 'output'):
return output(string_)
else:
_oldstdout.write(string_)
def flush(self):
return _oldstdout.flush()
def close(self):
return _oldstdout.close()
_context = {currentThread():Storage()}
ctx = context = threadeddict(_context)
ctx.__doc__ = """
A `storage` object containing various information about the request:
`environ` (aka `env`)
: A dictionary containing the standard WSGI environment variables.
`host`
: The domain (`Host` header) requested by the user.
`home`
: The base path for the application.
`ip`
: The IP address of the requester.
`method`
: The HTTP method used.
`path`
: The path request.
`fullpath`
: The full path requested, including query arguments.
### Response Data
`status` (default: "200 OK")
: The status code to be used in the response.
`headers`
: A list of 2-tuples to be used in the response.
`output`
: A string to be used as the response.
"""
if not '_oldstdout' in globals():
_oldstdout = sys.stdout
sys.stdout = _outputter()
loadhooks = {}
def load():
"""
Loads a new context for the thread.
You can ask for a function to be run at loadtime by
adding it to the dictionary `loadhooks`.
"""
_context[currentThread()] = Storage()
ctx.status = '200 OK'
ctx.headers = []
if 'db_parameters' in globals():
connect(**db_parameters)
for x in loadhooks.values(): x()
def _load(env):
load()
ctx.output = ''
ctx.environ = ctx.env = env
ctx.host = env.get('HTTP_HOST')
ctx.home = 'http://' + env.get('HTTP_HOST', '[unknown]') + \
os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
ctx.ip = env.get('REMOTE_ADDR')
ctx.method = env.get('REQUEST_METHOD')
ctx.path = env.get('PATH_INFO')
# http://trac.lighttpd.net/trac/ticket/406 requires:
if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0],
os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', '')))
ctx.fullpath = ctx.path
if env.get('QUERY_STRING'):
ctx.fullpath += '?' + env.get('QUERY_STRING', '')
unloadhooks = {}
def unload():
"""
Unloads the context for the thread.
You can ask for a function to be run at loadtime by
adding it ot the dictionary `unloadhooks`.
"""
for x in unloadhooks.values(): x()
# ensures db cursors and such are GCed promptly
del _context[currentThread()]
def _unload():
unload()
if __name__ == "__main__":
import doctest
doctest.testmod()
urls = ('/web.py', 'source')
class source:
def GET(self):
header('Content-Type', 'text/python')
print open(sys.argv[0]).read()
run(urls)
|
stonegithubs/micropython
|
refs/heads/master
|
tests/basics/set_copy.py
|
118
|
s = {1, 2, 3, 4}
t = s.copy()
s.add(5)
t.add(7)
for i in s, t:
print(sorted(i))
|
ifduyue/django
|
refs/heads/master
|
django/contrib/postgres/functions.py
|
98
|
from django.db.models import DateTimeField, Func, UUIDField
class RandomUUID(Func):
template = 'GEN_RANDOM_UUID()'
output_field = UUIDField()
class TransactionNow(Func):
template = 'CURRENT_TIMESTAMP'
output_field = DateTimeField()
|
Jgarcia-IAS/localizacion
|
refs/heads/master
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons/website_sale_options/models/product.py
|
395
|
# -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = "product.template"
_columns = {
'optional_product_ids': fields.many2many('product.template','product_optional_rel','src_id','dest_id',string='Optional Products', help="Products to propose when add to cart."),
}
|
firebase/grpc
|
refs/heads/master
|
bazel/test/python_test_repo/helloworld.py
|
13
|
# Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
import contextlib
import datetime
import logging
import unittest
import grpc
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from concurrent import futures
import helloworld_pb2
import helloworld_pb2_grpc
_HOST = 'localhost'
_SERVER_ADDRESS = '{}:0'.format(_HOST)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
request_in_flight = datetime.datetime.now() - \
request.request_initiation.ToDatetime()
request_duration = duration_pb2.Duration()
request_duration.FromTimedelta(request_in_flight)
return helloworld_pb2.HelloReply(
message='Hello, %s!' % request.name,
request_duration=request_duration,
)
@contextlib.contextmanager
def _listening_server():
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
port = server.add_insecure_port(_SERVER_ADDRESS)
server.start()
try:
yield port
finally:
server.stop(0)
class ImportTest(unittest.TestCase):
def test_import(self):
with _listening_server() as port:
with grpc.insecure_channel('{}:{}'.format(_HOST, port)) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
request_timestamp = timestamp_pb2.Timestamp()
request_timestamp.GetCurrentTime()
response = stub.SayHello(helloworld_pb2.HelloRequest(
name='you',
request_initiation=request_timestamp,
),
wait_for_ready=True)
self.assertEqual(response.message, "Hello, you!")
self.assertGreater(response.request_duration.nanos, 0)
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
|
hurricup/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/db/backends/mysql/base.py
|
308
|
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.contrib.gis.db.backends.mysql.creation import MySQLCreation
from django.contrib.gis.db.backends.mysql.introspection import MySQLIntrospection
from django.contrib.gis.db.backends.mysql.operations import MySQLOperations
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.creation = MySQLCreation(self)
self.ops = MySQLOperations()
self.introspection = MySQLIntrospection(self)
|
hungtt57/matchmaker
|
refs/heads/master
|
lib/python2.7/site-packages/django/core/files/uploadhandler.py
|
102
|
"""
Base file upload handler classes, and the built-in concrete subclasses
"""
from __future__ import unicode_literals
from io import BytesIO
from django.conf import settings
from django.core.files.uploadedfile import (
InMemoryUploadedFile, TemporaryUploadedFile,
)
from django.utils.encoding import python_2_unicode_compatible
from django.utils.module_loading import import_string
__all__ = [
'UploadFileException', 'StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler', 'load_handler',
'StopFutureHandlers'
]
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
@python_2_unicode_compatible
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __str__(self):
if self.connection_reset:
return 'StopUpload: Halt current upload.'
else:
return 'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 # : The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.content_type_extra = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length, charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a receive_data_chunk() method')
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = BytesIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the BytesIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file=self.file,
field_name=self.field_name,
name=self.file_name,
content_type=self.content_type,
size=file_size,
charset=self.charset,
content_type_extra=self.content_type_extra
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> from django.http import HttpRequest
>>> request = HttpRequest()
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
return import_string(path)(*args, **kwargs)
|
razzius/PyClassLessons
|
refs/heads/master
|
instructors/need-rework/15_classy_OOP/cat_class.py
|
3
|
import random
class Cat(object):
#import random
enemies = 5
def __init__(self, name=None, top_speed=9223372036854775808L, cat=None, hunger=None, food=None):
self.name = name
self.top_speed = top_speed
self.hunger = random.randint(1,10)
self.food = []
self.at_vet = False
#if self.hunger >= 10:
# return "cat at vet: {}".format(self.at_vet=True)
def change_name(self, name):
self.name = name
def __str__(self):
return "this is a cat named {0} and has attributes {1}" \
.format(self.name, self.__dict__)
def chase_bird(self):
self.hunger += 1
if self.hunger >= 10:
self.at_vet=True
return "cat at vet: {}".format(self.at_vet)
def catch_bird(self):
if random.randint(1,2) == 1:
self.food.append(2)
else:
print("the bird flew away")
def eat(self):
self.hunger -= self.food.pop()
cat = Cat()
|
liulion/mayavi
|
refs/heads/master
|
examples/mayavi/mlab/magnetic_field_lines.py
|
10
|
"""
This example uses the streamline module to display field lines of a
magnetic dipole (a current loop).
This example requires scipy.
The magnetic field from an arbitrary current loop is calculated from
eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.
To get a prettier result, we use a fairly large grid to sample the
field. As a consequence, we need to clear temporary arrays as soon as
possible.
For a more thorough example of magnetic field calculation and
visualization with Mayavi and scipy, see
:ref:`example_magnetic_field`.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
import numpy as np
from scipy import special
#### Calculate the field ####################################################
radius = 1 # Radius of the coils
x, y, z = [e.astype(np.float32) for e in
np.ogrid[-10:10:150j, -10:10:150j, -10:10:150j]]
# express the coordinates in polar form
rho = np.sqrt(x ** 2 + y ** 2)
x_proj = x / rho
y_proj = y / rho
# Free memory early
del x, y
E = special.ellipe((4 * radius * rho) / ((radius + rho) ** 2 + z ** 2))
K = special.ellipk((4 * radius * rho) / ((radius + rho) ** 2 + z ** 2))
Bz = 1 / np.sqrt((radius + rho) ** 2 + z ** 2) * (
K
+ E * (radius ** 2 - rho ** 2 - z ** 2) /
((radius - rho) ** 2 + z ** 2)
)
Brho = z / (rho * np.sqrt((radius + rho) ** 2 + z ** 2)) * (
- K
+ E * (radius ** 2 + rho ** 2 + z ** 2) /
((radius - rho) ** 2 + z ** 2)
)
del E, K, z, rho
# On the axis of the coil we get a divided by zero. This returns a
# NaN, where the field is actually zero :
Brho[np.isnan(Brho)] = 0
Bx, By = x_proj * Brho, y_proj * Brho
del x_proj, y_proj, Brho
#### Visualize the field ####################################################
from mayavi import mlab
fig = mlab.figure(1, size=(400, 400), bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
field = mlab.pipeline.vector_field(Bx, By, Bz)
# Unfortunately, the above call makes a copy of the arrays, so we delete
# this copy to free memory.
del Bx, By, Bz
magnitude = mlab.pipeline.extract_vector_norm(field)
contours = mlab.pipeline.iso_surface(magnitude,
contours=[0.01, 0.8, 3.8, ],
transparent=True,
opacity=0.4,
colormap='YlGnBu',
vmin=0, vmax=2)
field_lines = mlab.pipeline.streamline(magnitude, seedtype='line',
integration_direction='both',
colormap='bone',
vmin=0, vmax=1)
# Tweak a bit the streamline.
field_lines.stream_tracer.maximum_propagation = 100.
field_lines.seed.widget.point1 = [69, 75.5, 75.5]
field_lines.seed.widget.point2 = [82, 75.5, 75.5]
field_lines.seed.widget.resolution = 50
field_lines.seed.widget.enabled = False
mlab.view(42, 73, 104, [79, 75, 76])
mlab.show()
|
ncoop/i3pystatus
|
refs/heads/master
|
i3pystatus/core/modules.py
|
7
|
import inspect
import traceback
from i3pystatus.core.settings import SettingsBase
from i3pystatus.core.threading import Manager
from i3pystatus.core.util import (convert_position,
MultiClickHandler)
from i3pystatus.core.command import execute
def is_method_of(method, object):
"""Decide whether ``method`` is contained within the MRO of ``object``."""
if not callable(method) or not hasattr(method, "__name__"):
return False
if inspect.ismethod(method):
return method.__self__ is object
for cls in inspect.getmro(object.__class__):
if cls.__dict__.get(method.__name__, None) is method:
return True
return False
class Module(SettingsBase):
position = 0
settings = (
('on_leftclick', "Callback called on left click (see :ref:`callbacks`)"),
('on_middleclick', "Callback called on middle click (see :ref:`callbacks`)"),
('on_rightclick', "Callback called on right click (see :ref:`callbacks`)"),
('on_upscroll', "Callback called on scrolling up (see :ref:`callbacks`)"),
('on_downscroll', "Callback called on scrolling down (see :ref:`callbacks`)"),
('on_doubleleftclick', "Callback called on double left click (see :ref:`callbacks`)"),
('on_doubleleftclick', "Callback called on double left click (see :ref:`callbacks`)"),
('on_doublemiddleclick', "Callback called on double middle click (see :ref:`callbacks`)"),
('on_doublerightclick', "Callback called on double right click (see :ref:`callbacks`)"),
('on_doubleupscroll', "Callback called on double scroll up (see :ref:`callbacks`)"),
('on_doubledownscroll', "Callback called on double scroll down (see :ref:`callbacks`)"),
('on_otherclick', "Callback called on other click (see :ref:`callbacks`)"),
('on_doubleotherclick', "Callback called on double other click (see :ref:`callbacks`)"),
('on_change', "Callback called when output is changed (see :ref:`callbacks`)"),
('multi_click_timeout', "Time (in seconds) before a single click is executed."),
('hints', "Additional output blocks for module output (see :ref:`hints`)"),
)
on_leftclick = None
on_middleclick = None
on_rightclick = None
on_upscroll = None
on_downscroll = None
on_doubleleftclick = None
on_doublemiddleclick = None
on_doublerightclick = None
on_doubleupscroll = None
on_doubledownscroll = None
on_otherclick = None
on_change = None
on_doubleotherclick = None
multi_click_timeout = 0.25
hints = {"markup": "none"}
def __init__(self, *args, **kwargs):
self._output = None
super(Module, self).__init__(*args, **kwargs)
self.__multi_click = MultiClickHandler(self.__button_callback_handler,
self.multi_click_timeout)
@property
def output(self):
return self._output
@output.setter
def output(self, value):
self._output = value
if self.on_change:
self.on_change()
def registered(self, status_handler):
"""Called when this module is registered with a status handler"""
self.__status_handler = status_handler
def inject(self, json):
if self.output:
if "name" not in self.output:
self.output["name"] = self.__name__
self.output["instance"] = str(id(self))
if (self.output.get("color", "") or "").lower() == "#ffffff":
del self.output["color"]
if self.hints:
for key, val in self.hints.items():
if key not in self.output:
self.output.update({key: val})
if self.output.get("markup") == "pango":
self.text_to_pango()
json.insert(convert_position(self.position, json), self.output)
def run(self):
pass
def send_output(self):
"""Send a status update with the current module output"""
self.__status_handler.io.async_refresh()
def __log_button_event(self, button, cb, args, action, **kwargs):
msg = "{}: button={}, cb='{}', args={}, kwargs={}, type='{}'".format(
self.__name__, button, cb, args, kwargs, action)
self.logger.debug(msg)
def __button_callback_handler(self, button, cb, **kwargs):
def call_callback(cb, *args, **kwargs):
# Recover the function if wrapped (with get_module for example)
wrapped_cb = getattr(cb, "__wrapped__", None)
if wrapped_cb:
locals()["self"] = self # Add self to the local stack frame
tmp_cb = wrapped_cb
else:
tmp_cb = cb
try:
args_spec = inspect.getargspec(tmp_cb)
except Exception:
args_spec = inspect.ArgSpec([], None, None, None)
# Remove all variables present in kwargs that are not used in the
# callback, except if there is a keyword argument.
if not args_spec.keywords:
kwargs = {k: v for k, v in kwargs.items()
if k in args_spec.args}
cb(*args, **kwargs)
if not cb:
self.__log_button_event(button, None, None,
"No callback attached", **kwargs)
return False
if isinstance(cb, list):
cb, args = (cb[0], cb[1:])
else:
args = []
try:
our_method = is_method_of(cb, self)
if callable(cb) and not our_method:
self.__log_button_event(button, cb, args,
"Python callback", **kwargs)
call_callback(cb, *args, **kwargs)
elif our_method:
self.__log_button_event(button, cb, args,
"Method callback", **kwargs)
call_callback(cb, self, *args, **kwargs)
elif hasattr(self, cb):
if cb is not "run":
# CommandEndpoint already calls run() after every
# callback to instantly update any changed state due
# to the callback's actions.
self.__log_button_event(button, cb, args,
"Member callback", **kwargs)
call_callback(getattr(self, cb), *args, **kwargs)
else:
self.__log_button_event(button, cb, args,
"External command", **kwargs)
if hasattr(self, "data"):
kwargs.update(self.data)
args = [str(arg).format(**kwargs) for arg in args]
cb = cb.format(**kwargs)
execute(cb + " " + " ".join(args), detach=True)
except Exception as e:
self.logger.critical("Exception while processing button "
"callback: {!r}".format(e))
self.logger.critical(traceback.format_exc())
# Notify status handler
try:
self.__status_handler.io.async_refresh()
except:
pass
def on_click(self, button, **kwargs):
"""
Maps a click event with its associated callback.
Currently implemented events are:
============ ================ =========
Event Callback setting Button ID
============ ================ =========
Left click on_leftclick 1
Middle click on_middleclick 2
Right click on_rightclick 3
Scroll up on_upscroll 4
Scroll down on_downscroll 5
Others on_otherclick > 5
============ ================ =========
The action is determined by the nature (type and value) of the callback
setting in the following order:
1. If null callback (``None``), no action is taken.
2. If it's a `python function`, call it and pass any additional
arguments.
3. If it's name of a `member method` of current module (string), call
it and pass any additional arguments.
4. If the name does not match with `member method` name execute program
with such name.
.. seealso:: :ref:`callbacks` for more information about
callback settings and examples.
:param button: The ID of button event received from i3bar.
:param kwargs: Further information received from i3bar like the
positions of the mouse where the click occured.
:return: Returns ``True`` if a valid callback action was executed.
``False`` otherwise.
"""
actions = ['leftclick', 'middleclick', 'rightclick',
'upscroll', 'downscroll']
try:
action = actions[button - 1]
except (TypeError, IndexError):
self.__log_button_event(button, None, None, "Other button")
action = "otherclick"
m_click = self.__multi_click
with m_click.lock:
double = m_click.check_double(button)
double_action = 'double%s' % action
if double:
action = double_action
# Get callback function
cb = getattr(self, 'on_%s' % action, None)
double_handler = getattr(self, 'on_%s' % double_action, None)
delay_execution = (not double and double_handler)
if delay_execution:
m_click.set_timer(button, cb, **kwargs)
else:
self.__button_callback_handler(button, cb, **kwargs)
def move(self, position):
self.position = position
return self
def text_to_pango(self):
"""
Replaces all ampersands in `full_text` and `short_text` attributes of
`self.output` with `&`.
It is called internally when pango markup is used.
Can be called multiple times (`&` won't change to `&amp;`).
"""
def replace(s):
s = s.split("&")
out = s[0]
for i in range(len(s) - 1):
if s[i + 1].startswith("amp;"):
out += "&" + s[i + 1]
else:
out += "&" + s[i + 1]
return out
if "full_text" in self.output.keys():
self.output["full_text"] = replace(self.output["full_text"])
if "short_text" in self.output.keys():
self.output["short_text"] = replace(self.output["short_text"])
class IntervalModule(Module):
settings = (
("interval", "interval in seconds between module updates"),
)
interval = 5 # seconds
managers = {}
def registered(self, status_handler):
super(IntervalModule, self).registered(status_handler)
if self.interval in IntervalModule.managers:
IntervalModule.managers[self.interval].append(self)
else:
am = Manager(self.interval)
am.append(self)
IntervalModule.managers[self.interval] = am
am.start()
def __call__(self):
self.run()
def run(self):
"""Called approximately every self.interval seconds
Do not rely on this being called from the same thread at all times.
If you need to always have the same thread context, subclass AsyncModule."""
|
hxddh/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/qqmusic.py
|
76
|
# coding: utf-8
from __future__ import unicode_literals
import random
import time
import re
from .common import InfoExtractor
from ..utils import (
strip_jsonp,
unescapeHTML,
clean_html,
)
from ..compat import compat_urllib_request
class QQMusicIE(InfoExtractor):
IE_NAME = 'qqmusic'
IE_DESC = 'QQ音乐'
_VALID_URL = r'http://y.qq.com/#type=song&mid=(?P<id>[0-9A-Za-z]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=song&mid=004295Et37taLD',
'md5': '9ce1c1c8445f561506d2e3cfb0255705',
'info_dict': {
'id': '004295Et37taLD',
'ext': 'mp3',
'title': '可惜没如果',
'upload_date': '20141227',
'creator': '林俊杰',
'description': 'md5:d327722d0361576fde558f1ac68a7065',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'note': 'There is no mp3-320 version of this song.',
'url': 'http://y.qq.com/#type=song&mid=004MsGEo3DdNxV',
'md5': 'fa3926f0c585cda0af8fa4f796482e3e',
'info_dict': {
'id': '004MsGEo3DdNxV',
'ext': 'mp3',
'title': '如果',
'upload_date': '20050626',
'creator': '李季美',
'description': 'md5:46857d5ed62bc4ba84607a805dccf437',
'thumbnail': 're:^https?://.*\.jpg$',
}
}]
_FORMATS = {
'mp3-320': {'prefix': 'M800', 'ext': 'mp3', 'preference': 40, 'abr': 320},
'mp3-128': {'prefix': 'M500', 'ext': 'mp3', 'preference': 30, 'abr': 128},
'm4a': {'prefix': 'C200', 'ext': 'm4a', 'preference': 10}
}
# Reference: m_r_GetRUin() in top_player.js
# http://imgcache.gtimg.cn/music/portal_v3/y/top_player.js
@staticmethod
def m_r_get_ruin():
curMs = int(time.time() * 1000) % 1000
return int(round(random.random() * 2147483647) * curMs % 1E10)
def _real_extract(self, url):
mid = self._match_id(url)
detail_info_page = self._download_webpage(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_yqq_song_detail_info.fcg?songmid=%s&play=0' % mid,
mid, note='Download song detail info',
errnote='Unable to get song detail info', encoding='gbk')
song_name = self._html_search_regex(
r"songname:\s*'([^']+)'", detail_info_page, 'song name')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', detail_info_page,
'publish time', default=None)
if publish_time:
publish_time = publish_time.replace('-', '')
singer = self._html_search_regex(
r"singer:\s*'([^']+)", detail_info_page, 'singer', default=None)
lrc_content = self._html_search_regex(
r'<div class="content" id="lrc_content"[^<>]*>([^<>]+)</div>',
detail_info_page, 'LRC lyrics', default=None)
if lrc_content:
lrc_content = lrc_content.replace('\\n', '\n')
thumbnail_url = None
albummid = self._search_regex(
[r'albummid:\'([0-9a-zA-Z]+)\'', r'"albummid":"([0-9a-zA-Z]+)"'],
detail_info_page, 'album mid', default=None)
if albummid:
thumbnail_url = "http://i.gtimg.cn/music/photo/mid_album_500/%s/%s/%s.jpg" \
% (albummid[-2:-1], albummid[-1], albummid)
guid = self.m_r_get_ruin()
vkey = self._download_json(
'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?json=3&guid=%s' % guid,
mid, note='Retrieve vkey', errnote='Unable to get vkey',
transform_source=strip_jsonp)['key']
formats = []
for format_id, details in self._FORMATS.items():
formats.append({
'url': 'http://cc.stream.qqmusic.qq.com/%s%s.%s?vkey=%s&guid=%s&fromtag=0'
% (details['prefix'], mid, details['ext'], vkey, guid),
'format': format_id,
'format_id': format_id,
'preference': details['preference'],
'abr': details.get('abr'),
})
self._check_formats(formats, mid)
self._sort_formats(formats)
return {
'id': mid,
'formats': formats,
'title': song_name,
'upload_date': publish_time,
'creator': singer,
'description': lrc_content,
'thumbnail': thumbnail_url,
}
class QQPlaylistBaseIE(InfoExtractor):
@staticmethod
def qq_static_url(category, mid):
return 'http://y.qq.com/y/static/%s/%s/%s/%s.html' % (category, mid[-2], mid[-1], mid)
@classmethod
def get_entries_from_page(cls, page):
entries = []
for item in re.findall(r'class="data"[^<>]*>([^<>]+)</', page):
song_mid = unescapeHTML(item).split('|')[-5]
entries.append(cls.url_result(
'http://y.qq.com/#type=song&mid=' + song_mid, 'QQMusic',
song_mid))
return entries
class QQMusicSingerIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:singer'
IE_DESC = 'QQ音乐 - 歌手'
_VALID_URL = r'http://y.qq.com/#type=singer&mid=(?P<id>[0-9A-Za-z]+)'
_TEST = {
'url': 'http://y.qq.com/#type=singer&mid=001BLpXF2DyJe2',
'info_dict': {
'id': '001BLpXF2DyJe2',
'title': '林俊杰',
'description': 'md5:2a222d89ba4455a3af19940c0481bb78',
},
'playlist_count': 12,
}
def _real_extract(self, url):
mid = self._match_id(url)
singer_page = self._download_webpage(
self.qq_static_url('singer', mid), mid, 'Download singer page')
entries = self.get_entries_from_page(singer_page)
singer_name = self._html_search_regex(
r"singername\s*:\s*'([^']+)'", singer_page, 'singer name',
default=None)
singer_id = self._html_search_regex(
r"singerid\s*:\s*'([0-9]+)'", singer_page, 'singer id',
default=None)
singer_desc = None
if singer_id:
req = compat_urllib_request.Request(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
req.add_header(
'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
singer_desc_page = self._download_xml(
req, mid, 'Donwload singer description XML')
singer_desc = singer_desc_page.find('./data/info/desc').text
return self.playlist_result(entries, mid, singer_name, singer_desc)
class QQMusicAlbumIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:album'
IE_DESC = 'QQ音乐 - 专辑'
_VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1',
'info_dict': {
'id': '000gXCTb2AhRR1',
'title': '我们都是这样长大的',
'description': 'md5:179c5dce203a5931970d306aa9607ea6',
},
'playlist_count': 4,
}, {
'url': 'http://y.qq.com/#type=album&mid=002Y5a3b3AlCu3',
'info_dict': {
'id': '002Y5a3b3AlCu3',
'title': '그리고...',
'description': 'md5:a48823755615508a95080e81b51ba729',
},
'playlist_count': 8,
}]
def _real_extract(self, url):
mid = self._match_id(url)
album = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_album_info_cp.fcg?albummid=%s&format=json' % mid,
mid, 'Download album page')['data']
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid']
) for song in album['list']
]
album_name = album.get('name')
album_detail = album.get('desc')
if album_detail is not None:
album_detail = album_detail.strip()
return self.playlist_result(entries, mid, album_name, album_detail)
class QQMusicToplistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:toplist'
IE_DESC = 'QQ音乐 - 排行榜'
_VALID_URL = r'http://y\.qq\.com/#type=toplist&p=(?P<id>(top|global)_[0-9]+)'
_TESTS = [{
'url': 'http://y.qq.com/#type=toplist&p=global_123',
'info_dict': {
'id': 'global_123',
'title': '美国iTunes榜',
},
'playlist_count': 10,
}, {
'url': 'http://y.qq.com/#type=toplist&p=top_3',
'info_dict': {
'id': 'top_3',
'title': 'QQ音乐巅峰榜·欧美',
'description': 'QQ音乐巅峰榜·欧美根据用户收听行为自动生成,集结当下最流行的欧美新歌!:更新时间:每周四22点|统'
'计周期:一周(上周四至本周三)|统计对象:三个月内发行的欧美歌曲|统计数量:100首|统计算法:根据'
'歌曲在一周内的有效播放次数,由高到低取前100名(同一歌手最多允许5首歌曲同时上榜)|有效播放次数:'
'登录用户完整播放一首歌曲,记为一次有效播放;同一用户收听同一首歌曲,每天记录为1次有效播放'
},
'playlist_count': 100,
}, {
'url': 'http://y.qq.com/#type=toplist&p=global_106',
'info_dict': {
'id': 'global_106',
'title': '韩国Mnet榜',
},
'playlist_count': 50,
}]
def _real_extract(self, url):
list_id = self._match_id(url)
list_type, num_id = list_id.split("_")
toplist_json = self._download_json(
'http://i.y.qq.com/v8/fcg-bin/fcg_v8_toplist_cp.fcg?type=%s&topid=%s&format=json'
% (list_type, num_id),
list_id, 'Download toplist page')
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['data']['songmid'], 'QQMusic', song['data']['songmid']
) for song in toplist_json['songlist']
]
topinfo = toplist_json.get('topinfo', {})
list_name = topinfo.get('ListName')
list_description = topinfo.get('info')
return self.playlist_result(entries, list_id, list_name, list_description)
class QQMusicPlaylistIE(QQPlaylistBaseIE):
IE_NAME = 'qqmusic:playlist'
IE_DESC = 'QQ音乐 - 歌单'
_VALID_URL = r'http://y\.qq\.com/#type=taoge&id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://y.qq.com/#type=taoge&id=3462654915',
'info_dict': {
'id': '3462654915',
'title': '韩国5月新歌精选下旬',
'description': 'md5:d2c9d758a96b9888cf4fe82f603121d4',
},
'playlist_count': 40,
}
def _real_extract(self, url):
list_id = self._match_id(url)
list_json = self._download_json(
'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0&disstid=%s'
% list_id, list_id, 'Download list page',
transform_source=strip_jsonp)['cdlist'][0]
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid']
) for song in list_json['songlist']
]
list_name = list_json.get('dissname')
list_description = clean_html(unescapeHTML(list_json.get('desc')))
return self.playlist_result(entries, list_id, list_name, list_description)
|
imsparsh/python-social-auth
|
refs/heads/master
|
social/tests/backends/test_utils.py
|
11
|
import unittest
from sure import expect
from social.tests.models import TestStorage
from social.tests.strategy import TestStrategy
from social.backends.utils import load_backends, get_backend
from social.backends.github import GithubOAuth2
class BaseBackendUtilsTest(unittest.TestCase):
def setUp(self):
self.strategy = TestStrategy(storage=TestStorage)
def tearDown(self):
self.strategy = None
class LoadBackendsTest(BaseBackendUtilsTest):
def test_load_backends(self):
loaded_backends = load_backends((
'social.backends.github.GithubOAuth2',
'social.backends.facebook.FacebookOAuth2',
'social.backends.flickr.FlickrOAuth'
), force_load=True)
keys = list(loaded_backends.keys())
keys.sort()
expect(keys).to.equal(['facebook', 'flickr', 'github'])
backends = ()
loaded_backends = load_backends(backends, force_load=True)
expect(len(list(loaded_backends.keys()))).to.equal(0)
class GetBackendTest(BaseBackendUtilsTest):
def test_get_backend(self):
backend = get_backend((
'social.backends.github.GithubOAuth2',
'social.backends.facebook.FacebookOAuth2',
'social.backends.flickr.FlickrOAuth'
), 'github')
expect(backend).to.equal(GithubOAuth2)
def test_get_missing_backend(self):
backend = get_backend((
'social.backends.github.GithubOAuth2',
'social.backends.facebook.FacebookOAuth2',
'social.backends.flickr.FlickrOAuth'
), 'foobar')
expect(backend).to.equal(None)
|
areteix/powerline
|
refs/heads/develop
|
tests/setup_statusline_catcher.py
|
28
|
# vim:fileencoding=utf-8:noet
import json
import vim
from powerline.lib.unicode import u
_powerline_old_render = powerline.render # NOQA
def _powerline_test_render_function(*args, **kwargs):
ret = _powerline_old_render(*args, **kwargs)
vim.eval('add(g:statusline_values, %s)' % json.dumps(u(ret)))
return ret
powerline.render = _powerline_test_render_function # NOQA
|
webkom/django-auth-abakus
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# django-auth-abakus documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 13 15:34:59 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-auth-abakus'
copyright = u'Abakus Webkom, License MIT'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-auth-abakusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-auth-abakus.tex', u'django-auth-abakus Documentation',
u'Abakus Webkom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-auth-abakus', u'django-auth-abakus Documentation',
[u'Abakus Webkom'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-auth-abakus', u'django-auth-abakus Documentation',
u'Abakus Webkom', 'django-auth-abakus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
ktbs/ktbs-bench
|
refs/heads/master
|
bin/bench.py
|
1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Run benchmarks.
Usage:
bench.py <bench_folder> [<output_folder>]
Options:
-h --help Show this help screen.
"""
from docopt import docopt
from os import listdir, path
from sys import path as sys_path
def scan_bench_files(directory):
"""Scan a directory for existing benchmark scripts.
:param str directory: path to the directory containing the benchmark scripts
:returns tuple:
"""
if directory is None:
directory = '.'
ldir = listdir(directory)
res = filter(lambda f: f.startswith('bench_') and f.endswith('.py'),
ldir)
res = map(lambda f: path.join(path.abspath(directory), f),
res)
return res
if __name__ == '__main__':
args = docopt(__doc__, version='bench 0.1')
arg_bench_folder = args['<bench_folder>']
for bench_file in scan_bench_files(arg_bench_folder):
sys_path.append(path.dirname(bench_file)) # Add script directory to sys.path in case of imports
execfile(bench_file)
|
aospx-kitkat/platform_external_chromium_org
|
refs/heads/kitkat
|
tools/perf_expectations/update_perf_expectations.py
|
161
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare tests that require re-baselining for input to make_expectations.py.
The regularly running perf-AV tests require re-baselineing of expectations
about once a week. The steps involved in rebaselining are:
1.) Identify the tests to update, based off reported e-mail results.
2.) Figure out reva and revb values, which is the starting and ending revision
numbers for the range that we should use to obtain new thresholds.
3.) Modify lines in perf_expectations.json referring to the tests to be updated,
so that they may be used as input to make_expectations.py.
This script automates the last step above.
Here's a sample line from perf_expectations.json:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
"revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0, "sha1": "54d94538"},
To get the above test ready for input to make_expectations.py, it should become:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
"revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0},
Examples:
1.) To update the test specified above and get baseline
values using the revision range 12345 and 23456, run this script with a command
line like this:
python update_perf_expectations.py -f \
win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
Or, using an input file,
where the input file contains a single line with text
win-release/media_tests_av_perf/fps/tulip2.m4a
run with this command line:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
2.) Let's say you want to update all seek tests on windows, and get baseline
values using the revision range 12345 and 23456.
Run this script with this command line:
python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
--reva 12345 --revb 23456
Or:
python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
--revb 23456
Or, using an input file,
where the input file contains a single line with text win-release/.*/seek/:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
3.) Similarly, if you want to update seek tests on all platforms
python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
--revb 23456
"""
import logging
from optparse import OptionParser
import os
import re
import make_expectations as perf_ex_lib
# Default logging is INFO. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.INFO
def GetTestsToUpdate(contents, all_test_keys):
"""Parses input contents and obtains tests to be re-baselined.
Args:
contents: string containing contents of input file.
all_test_keys: list of keys of test dictionary.
Returns:
A list of keys for tests that should be updated.
"""
# Each line of the input file specifies a test case to update.
tests_list = []
for test_case_filter in contents.splitlines():
# Skip any empty lines.
if test_case_filter:
# Sample expected line:
# win-release/media_tests_av_perf/seek/\
# CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
# Or, if reg-ex, then sample line:
# win-release/media-tests_av_perf/seek*
# Skip any leading spaces if they exist in the input file.
logging.debug('Trying to match %s', test_case_filter)
tests_list.extend(GetMatchingTests(test_case_filter.strip(),
all_test_keys))
return tests_list
def GetMatchingTests(tests_to_update, all_test_keys):
"""Parses input reg-ex filter and obtains tests to be re-baselined.
Args:
tests_to_update: reg-ex string specifying tests to be updated.
all_test_keys: list of keys of tests dictionary.
Returns:
A list of keys for tests that should be updated.
"""
tests_list = []
search_string = re.compile(tests_to_update)
# Get matching tests from the dictionary of tests
for test_key in all_test_keys:
if search_string.match(test_key):
tests_list.append(test_key)
logging.debug('%s will be updated', test_key)
logging.info('%s tests found matching reg-ex: %s', len(tests_list),
tests_to_update)
return tests_list
def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
"""Modifies value of tests that are to re-baselined:
Set reva and revb values to specified new values. Remove sha1.
Args:
tests_to_update: list of tests to be updated.
all_tests: dictionary of all tests.
reva: oldest revision in range to use for new values.
revb: newest revision in range to use for new values.
Raises:
ValueError: If reva or revb are not valid ints, or if either
of them are negative.
"""
reva = int(reva)
revb = int(revb)
if reva < 0 or revb < 0:
raise ValueError('Revision values should be positive.')
# Ensure reva is less than revb.
# (this is similar to the check done in make_expectations.py)
if revb < reva:
temp = revb
revb = reva
reva = temp
for test_key in tests_to_update:
# Get original test from the dictionary of tests
test_value = all_tests[test_key]
if test_value:
# Sample line in perf_expectations.json:
# "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
# {"reva": 155180, "revb": 155280, "type": "absolute", \
# "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
# Set new revision range
test_value['reva'] = reva
test_value['revb'] = revb
# Remove sha1 to indicate this test requires an update
# Check first to make sure it exist.
if 'sha1' in test_value:
del test_value['sha1']
else:
logging.warning('%s does not exist.', test_key)
logging.info('Done preparing tests for update.')
def GetCommandLineOptions():
"""Parse command line arguments.
Returns:
An options object containing command line arguments and their values.
"""
parser = OptionParser()
parser.add_option('--reva', dest='reva', type='int',
help='Starting revision of new range.',
metavar='START_REVISION')
parser.add_option('--revb', dest='revb', type='int',
help='Ending revision of new range.',
metavar='END_REVISION')
parser.add_option('-f', dest='tests_filter',
help='Regex to use for filtering tests to be updated. '
'At least one of -filter or -input_file must be provided. '
'If both are provided, then input-file is used.',
metavar='FILTER', default='')
parser.add_option('-i', dest='input_file',
help='Optional path to file with reg-exes for tests to'
' update. If provided, it overrides the filter argument.',
metavar='INPUT_FILE', default='')
parser.add_option('--config', dest='config_file',
default=perf_ex_lib.DEFAULT_CONFIG_FILE,
help='Set the config file to FILE.', metavar='FILE')
parser.add_option('-v', dest='verbose', action='store_true', default=False,
help='Enable verbose output.')
options = parser.parse_args()[0]
return options
def Main():
"""Main driver function."""
options = GetCommandLineOptions()
_SetLogger(options.verbose)
# Do some command-line validation
if not options.input_file and not options.tests_filter:
logging.error('At least one of input-file or test-filter must be provided.')
exit(1)
if options.input_file and options.tests_filter:
logging.error('Specify only one of input file or test-filter.')
exit(1)
if not options.reva or not options.revb:
logging.error('Start and end revision of range must be specified.')
exit(1)
# Load config.
config = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(options.config_file))
# Obtain the perf expectations file from the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
# We should have all the information we require now.
# On to the real thang.
# First, get all the existing tests from the original perf_expectations file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(perf_file))
all_test_keys = all_tests.keys()
# Remove the load key, because we don't want to modify it.
all_test_keys.remove('load')
# Keep tests sorted, like in the original file.
all_test_keys.sort()
# Next, get all tests that have been identified for an update.
tests_to_update = []
if options.input_file:
# Tests to update have been specified in an input_file.
# Get contents of file.
tests_filter = perf_ex_lib.ReadFile(options.input_file)
elif options.tests_filter:
# Tests to update have been specified as a reg-ex filter.
tests_filter = options.tests_filter
# Get tests to update based on filter specified.
tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
logging.info('Done obtaining matching tests.')
# Now, prepare tests for update.
PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
# Finally, write modified tests back to perf_expectations file.
perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
calculate_sha1=False)
logging.info('Done writing tests for update to %s.', perf_file)
def _SetLogger(verbose):
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
if __name__ == '__main__':
Main()
|
pepsi7959/ProtocolBuffer-c
|
refs/heads/master
|
gtest/test/gtest_test_utils.py
|
408
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
VitalPet/c2c-rd-addons
|
refs/heads/8.0
|
c2c_autosearch_check/__init__.py
|
4
|
# -*- coding: utf-8 -*-
##############################################
#
# ChriCar Beteiligungs- und Beratungs- GmbH
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# created 2009-08-18 23:44:30+02
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################
import autosearch
|
AXAz0r/apex-sigma
|
refs/heads/master
|
sigma/plugins/minigames/slots/slot_core.py
|
2
|
import random
import asyncio
import arrow
import discord
from config import SlotWinChannelID
slot_back_data = {}
async def spin_slots(cmd, message, bet_amt, symbols, min_spins=4, max_spins=8, spin_cycle_timeout=1):
# Cooldown Check
not_on_cd = True
current_time = arrow.utcnow().timestamp
if message.author.id in slot_back_data:
cd_timestamp = slot_back_data[message.author.id]
if cd_timestamp + 60 > current_time:
not_on_cd = False
else:
not_on_cd = True
# Spinnage
if not_on_cd:
player_points = cmd.db.get_points(message.author)
if player_points['Current'] < bet_amt:
embed = discord.Embed(color=0xDB0000, title='❗ Not Enough Points')
await message.channel.send(None, embed=embed)
return
cmd.db.take_points(message.guild, message.author, bet_amt)
embed_colors = [0x990000, 0x0066FF, 0x009900, 0xff9900, 0xCC33FF, 0x990033]
slot_embed = discord.Embed(color=random.choice(embed_colors))
slot_back_data.update({message.author.id: current_time})
rand_done = 0
res_1 = random.choice(symbols)
res_2 = random.choice(symbols)
res_3 = random.choice(symbols)
res_4 = random.choice(symbols)
res_5 = random.choice(symbols)
res_6 = random.choice(symbols)
res_7 = random.choice(symbols)
res_8 = random.choice(symbols)
res_9 = random.choice(symbols)
slot_view = '⏸ ' + res_4 + ' ' + res_5 + ' ' + res_6 + ' ⏸'
slot_view += '\n▶ ' + res_1 + ' ' + res_2 + ' ' + res_3 + ' ◀'
slot_view += '\n⏸ ' + res_7 + ' ' + res_8 + ' ' + res_9 + ' ⏸'
slot_embed.add_field(name='🎰 Slots are spinning...', value=slot_view)
slot_spinner = await message.channel.send(None, embed=slot_embed)
spin_amt = random.randint(min_spins, max_spins)
while rand_done < spin_amt:
await asyncio.sleep(spin_cycle_timeout)
rand_done += 1
res_7 = res_1
res_8 = res_2
res_9 = res_3
res_1 = res_4
res_2 = res_5
res_3 = res_6
res_4 = random.choice(symbols)
res_5 = random.choice(symbols)
res_6 = random.choice(symbols)
slot_view = '⏸ ' + res_4 + ' ' + res_5 + ' ' + res_6 + ' ⏸'
slot_view += '\n▶ ' + res_1 + ' ' + res_2 + ' ' + res_3 + ' ◀'
slot_view += '\n⏸ ' + res_7 + ' ' + res_8 + ' ' + res_9 + ' ⏸'
slot_embed.set_field_at(0, name='🎰 Slots are spinning...', value=slot_view)
await slot_spinner.edit(embed=slot_embed)
# Result Response
subtext = ''
if res_1 == res_2 == res_3:
win = True
pts = bet_amt * 210
subtext += 'Your major victory has been recorded on the `#slot-wins` channel of Sigma\'s official server.'
win_notify_channel_object = None
for server in cmd.bot.guilds:
for channel in server.channels:
if channel.id == SlotWinChannelID:
win_notify_channel_object = channel
break
if SlotWinChannelID:
win_notify_embed = discord.Embed(color=0x0099FF, title='💎 We have a winner!')
win_notify_embed.add_field(name='User', value=message.author.name)
win_notify_embed.add_field(name='Server', value=message.guild.name)
embed_icon = message.author.default_avatar_url
if message.author.avatar_url != '':
embed_icon = message.author.avatar_url
win_notify_embed.set_thumbnail(url=embed_icon)
await win_notify_channel_object.send(None, embed=win_notify_embed)
elif res_1 == res_2 or res_1 == res_3 or res_2 == res_3:
win = True
pts = bet_amt * 12
else:
win = False
pts = 0
if win:
cmd.db.add_points(message.guild, message.author, pts)
slot_embed.set_field_at(0, name='💎 You Won!', value=slot_view)
slot_embed.set_footer(text='You won ' + str(pts) + ' points.')
await slot_spinner.edit(embed=slot_embed)
else:
slot_embed.set_field_at(0, name='💣 You Lost...', value=slot_view)
slot_embed.set_footer(text='You lost the ' + str(bet_amt) + ' points that you bet.')
await slot_spinner.edit(embed=slot_embed)
else:
cd_timestamp = slot_back_data[message.author.id]
current_time = arrow.utcnow().timestamp
timeout_amt = cd_timestamp + 60 - current_time
embed = discord.Embed(color=0xDB0000,
title='❗ You can\'t spin for another ' + str(timeout_amt) + ' seconds!')
await message.channel.send(embed=embed)
return
|
almeidapaulopt/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/employee_advance/test_employee_advance.py
|
17
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
from erpnext.hr.doctype.employee_advance.employee_advance import make_bank_entry
from erpnext.hr.doctype.employee_advance.employee_advance import EmployeeAdvanceOverPayment
class TestEmployeeAdvance(unittest.TestCase):
def test_paid_amount_and_status(self):
advance = make_employee_advance()
journal_entry = make_payment_entry(advance)
journal_entry.submit()
advance.reload()
self.assertEqual(advance.paid_amount, 1000)
self.assertEqual(advance.status, "Paid")
# try making over payment
journal_entry1 = make_payment_entry(advance)
self.assertRaises(EmployeeAdvanceOverPayment, journal_entry1.submit)
def make_payment_entry(advance):
journal_entry = frappe.get_doc(make_bank_entry("Employee Advance", advance.name))
journal_entry.cheque_no = "123123"
journal_entry.cheque_date = nowdate()
journal_entry.save()
return journal_entry
def make_employee_advance():
doc = frappe.new_doc("Employee Advance")
doc.employee = "_T-Employee-00001"
doc.company = "_Test company"
doc.purpose = "For site visit"
doc.advance_amount = 1000
doc.posting_date = nowdate()
doc.advance_account = "_Test Employee Advance - _TC"
doc.insert()
doc.submit()
return doc
|
brutasse/djangopeople
|
refs/heads/master
|
djangopeople/django_openidauth/migrations/0001_initial.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserOpenID',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('openid', models.CharField(unique=True, max_length=255, verbose_name='OpenID')),
('created_at', models.DateTimeField(verbose_name='Creation date')),
('user', models.ForeignKey(
verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
)),
],
options={
'ordering': ('-created_at',),
},
bases=(models.Model,),
),
]
|
grampajoe/django-tenant-templates
|
refs/heads/master
|
django_tenant_templates/middleware.py
|
1
|
"""
Middleware!
"""
from django_tenant_templates import local
class TenantMiddleware(object):
"""Middleware for enabling tenant-aware template loading."""
slug_property_name = 'tenant_slug'
def process_request(self, request):
local.tenant_slug = getattr(request, self.slug_property_name, None)
|
mattclay/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py
|
12133432
| |
phenoxim/nova
|
refs/heads/master
|
nova/tests/unit/objects/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.