repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
0k/openerp-server
refs/heads/ocb-7.0
openerp/report/render/rml2pdf/trml2pdf.py
2
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import sys import copy import reportlab import re from reportlab.pdfgen import canvas from reportlab import platypus import utils import color import os import logging from lxml import etree import base64 from reportlab.platypus.doctemplate import ActionFlowable from openerp.tools.safe_eval import safe_eval as eval from reportlab.lib.units import inch,cm,mm from openerp.tools.misc import file_open from reportlab.pdfbase import pdfmetrics from reportlab.lib.pagesizes import A4, letter try: from cStringIO import StringIO _hush_pyflakes = [ StringIO ] except ImportError: from StringIO import StringIO _logger = logging.getLogger(__name__) encoding = 'utf-8' def select_fontname(fontname, default_fontname): if fontname not in pdfmetrics.getRegisteredFontNames()\ or fontname not in pdfmetrics.standardFonts: # let reportlab attempt to find it try: pdfmetrics.getFont(fontname) except Exception: _logger.warning('Could not locate font %s, substituting default: %s', fontname, default_fontname) fontname = default_fontname return fontname def _open_image(filename, path=None): """Attempt to open a binary file and return the descriptor """ if os.path.isfile(filename): return open(filename, 'rb') for p in (path or []): if p and os.path.isabs(p): fullpath = os.path.join(p, filename) if os.path.isfile(fullpath): return open(fullpath, 'rb') try: if p: fullpath = os.path.join(p, filename) else: fullpath = filename return file_open(fullpath) except IOError: pass raise IOError("File %s cannot be found in image path" % filename) class NumberedCanvas(canvas.Canvas): def __init__(self, *args, **kwargs): canvas.Canvas.__init__(self, *args, **kwargs) self._saved_page_states = [] def showPage(self): self._saved_page_states.append(dict(self.__dict__)) self._startPage() def save(self): """add page info to each page (page x of y)""" for state in self._saved_page_states: self.__dict__.update(state) self.draw_page_number() canvas.Canvas.showPage(self) canvas.Canvas.save(self) def draw_page_number(self): page_count = len(self._saved_page_states) self.setFont("Helvetica", 8) self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40), " %(this)i / %(total)i" % { 'this': self._pageNumber+1, 'total': page_count, } ) class PageCount(platypus.Flowable): def __init__(self, story_count=0): platypus.Flowable.__init__(self) self.story_count = story_count def draw(self): self.canv.beginForm("pageCount%d" % self.story_count) self.canv.setFont("Helvetica", utils.unit_get(str(8))) self.canv.drawString(0, 0, str(self.canv.getPageNumber())) self.canv.endForm() class PageReset(platypus.Flowable): def draw(self): self.canv._doPageReset = True class _rml_styles(object,): def __init__(self, nodes, localcontext): self.localcontext = localcontext self.styles = {} self.styles_obj = {} self.names = {} self.table_styles = {} self.default_style = reportlab.lib.styles.getSampleStyleSheet() for node in nodes: for style in node.findall('blockTableStyle'): self.table_styles[style.get('id')] = self._table_style_get(style) for style in node.findall('paraStyle'): sname = style.get('name') self.styles[sname] = self._para_style_update(style) if self.default_style.has_key(sname): for key, value in self.styles[sname].items(): setattr(self.default_style[sname], key, value) else: self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname]) for variable in node.findall('initialize'): for name in variable.findall('name'): self.names[ name.get('id')] = name.get('value') def _para_style_update(self, node): data = {} for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']: if node.get(attr): data[attr] = color.get(node.get(attr)) for attr in ['bulletFontName', 'fontName']: if node.get(attr): fontname= select_fontname(node.get(attr), None) if fontname is not None: data['fontName'] = fontname for attr in ['bulletText']: if node.get(attr): data[attr] = node.get(attr) for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter', 'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading', 'borderWidth','borderPadding','borderRadius']: if node.get(attr): data[attr] = utils.unit_get(node.get(attr)) if node.get('alignment'): align = { 'right':reportlab.lib.enums.TA_RIGHT, 'center':reportlab.lib.enums.TA_CENTER, 'justify':reportlab.lib.enums.TA_JUSTIFY } data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT) return data def _table_style_get(self, style_node): styles = [] for node in style_node: start = utils.tuple_int_get(node, 'start', (0,0) ) stop = utils.tuple_int_get(node, 'stop', (-1,-1) ) if node.tag=='blockValign': styles.append(('VALIGN', start, stop, str(node.get('value')))) elif node.tag=='blockFont': styles.append(('FONT', start, stop, str(node.get('name')))) elif node.tag=='blockTextColor': styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName'))))) elif node.tag=='blockLeading': styles.append(('LEADING', start, stop, utils.unit_get(node.get('length')))) elif node.tag=='blockAlignment': styles.append(('ALIGNMENT', start, stop, str(node.get('value')))) elif node.tag=='blockSpan': styles.append(('SPAN', start, stop)) elif node.tag=='blockLeftPadding': styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length')))) elif node.tag=='blockRightPadding': styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length')))) elif node.tag=='blockTopPadding': styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length')))) elif node.tag=='blockBottomPadding': styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length')))) elif node.tag=='blockBackground': styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName')))) if node.get('size'): styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size')))) elif node.tag=='lineStyle': kind = node.get('kind') kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ] assert kind in kind_list thick = 1 if node.get('thickness'): thick = float(node.get('thickness')) styles.append((kind, start, stop, thick, color.get(node.get('colorName')))) return platypus.tables.TableStyle(styles) def para_style_get(self, node): style = False sname = node.get('style') if sname: if sname in self.styles_obj: style = self.styles_obj[sname] else: _logger.debug('Warning: style not found, %s - setting default!', node.get('style')) if not style: style = self.default_style['Normal'] para_update = self._para_style_update(node) if para_update: # update style only is necessary style = copy.deepcopy(style) style.__dict__.update(para_update) return style class _rml_doc(object): def __init__(self, node, localcontext=None, images=None, path='.', title=None): if images is None: images = {} if localcontext is None: localcontext = {} self.localcontext = localcontext self.etree = node self.filename = self.etree.get('filename') self.images = images self.path = path self.title = title def docinit(self, els): from reportlab.lib.fonts import addMapping from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont for node in els: for font in node.findall('registerFont'): name = font.get('fontName').encode('ascii') fname = font.get('fontFile').encode('ascii') if name not in pdfmetrics._fonts: pdfmetrics.registerFont(TTFont(name, fname)) #by default, we map the fontName to each style (bold, italic, bold and italic), so that #if there isn't any font defined for one of these style (via a font family), the system #will fallback on the normal font. addMapping(name, 0, 0, name) #normal addMapping(name, 0, 1, name) #italic addMapping(name, 1, 0, name) #bold addMapping(name, 1, 1, name) #italic and bold #if registerFontFamily is defined, we register the mapping of the fontName to use for each style. for font_family in node.findall('registerFontFamily'): family_name = font_family.get('normal').encode('ascii') if font_family.get('italic'): addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii')) if font_family.get('bold'): addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii')) if font_family.get('boldItalic'): addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii')) def setTTFontMapping(self,face, fontname, filename, mode='all'): from reportlab.lib.fonts import addMapping from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont if fontname not in pdfmetrics._fonts: pdfmetrics.registerFont(TTFont(fontname, filename)) if mode == 'all': addMapping(face, 0, 0, fontname) #normal addMapping(face, 0, 1, fontname) #italic addMapping(face, 1, 0, fontname) #bold addMapping(face, 1, 1, fontname) #italic and bold elif (mode== 'normal') or (mode == 'regular'): addMapping(face, 0, 0, fontname) #normal elif mode == 'italic': addMapping(face, 0, 1, fontname) #italic elif mode == 'bold': addMapping(face, 1, 0, fontname) #bold elif mode == 'bolditalic': addMapping(face, 1, 1, fontname) #italic and bold def _textual_image(self, node): rc = '' for n in node: rc +=( etree.tostring(n) or '') + n.tail return base64.decodestring(node.tostring()) def _images(self, el): result = {} for node in el.findall('.//image'): rc =( node.text or '') result[node.get('name')] = base64.decodestring(rc) return result def render(self, out): el = self.etree.findall('.//docinit') if el: self.docinit(el) el = self.etree.findall('.//stylesheet') self.styles = _rml_styles(el,self.localcontext) el = self.etree.findall('.//images') if el: self.images.update( self._images(el[0]) ) el = self.etree.findall('.//template') if len(el): pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title) el = utils._child_get(self.etree, self, 'story') pt_obj.render(el) else: self.canvas = canvas.Canvas(out) pd = self.etree.find('pageDrawing')[0] pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title) pd_obj.render(pd) self.canvas.showPage() self.canvas.save() class _rml_canvas(object): def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None): if images is None: images = {} self.localcontext = localcontext self.canvas = canvas self.styles = doc.styles self.doc_tmpl = doc_tmpl self.doc = doc self.images = images self.path = path self.title = title if self.title: self.canvas.setTitle(self.title) def _textual(self, node, x=0, y=0): text = node.text and node.text.encode('utf-8') or '' rc = utils._process_text(self, text) for n in node: if n.tag == 'seq': from reportlab.lib.sequencer import getSequencer seq = getSequencer() rc += str(seq.next(n.get('id'))) if n.tag == 'pageCount': if x or y: self.canvas.translate(x,y) self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,)) if x or y: self.canvas.translate(-x,-y) if n.tag == 'pageNumber': rc += str(self.canvas.getPageNumber()) rc += utils._process_text(self, n.tail) return rc.replace('\n','') def _drawString(self, node): v = utils.attr_get(node, ['x','y']) text=self._textual(node, **v) text = utils.xml2str(text) self.canvas.drawString(text=text, **v) def _drawCenteredString(self, node): v = utils.attr_get(node, ['x','y']) text=self._textual(node, **v) text = utils.xml2str(text) self.canvas.drawCentredString(text=text, **v) def _drawRightString(self, node): v = utils.attr_get(node, ['x','y']) text=self._textual(node, **v) text = utils.xml2str(text) self.canvas.drawRightString(text=text, **v) def _rect(self, node): if node.get('round'): self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'})) else: self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'})) def _ellipse(self, node): x1 = utils.unit_get(node.get('x')) x2 = utils.unit_get(node.get('width')) y1 = utils.unit_get(node.get('y')) y2 = utils.unit_get(node.get('height')) self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'})) def _curves(self, node): line_str = node.text.split() lines = [] while len(line_str)>7: self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]]) line_str = line_str[8:] def _lines(self, node): line_str = node.text.split() lines = [] while len(line_str)>3: lines.append([utils.unit_get(l) for l in line_str[0:4]]) line_str = line_str[4:] self.canvas.lines(lines) def _grid(self, node): xlist = [utils.unit_get(s) for s in node.get('xs').split(',')] ylist = [utils.unit_get(s) for s in node.get('ys').split(',')] self.canvas.grid(xlist, ylist) def _translate(self, node): dx = utils.unit_get(node.get('dx')) or 0 dy = utils.unit_get(node.get('dy')) or 0 self.canvas.translate(dx,dy) def _circle(self, node): self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'})) def _place(self, node): flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node) infos = utils.attr_get(node, ['x','y','width','height']) infos['y']+=infos['height'] for flow in flows: w,h = flow.wrap(infos['width'], infos['height']) if w<=infos['width'] and h<=infos['height']: infos['y']-=h flow.drawOn(self.canvas,infos['x'],infos['y']) infos['height']-=h else: raise ValueError("Not enough space") def _line_mode(self, node): ljoin = {'round':1, 'mitered':0, 'bevelled':2} lcap = {'default':0, 'round':1, 'square':2} if node.get('width'): self.canvas.setLineWidth(utils.unit_get(node.get('width'))) if node.get('join'): self.canvas.setLineJoin(ljoin[node.get('join')]) if node.get('cap'): self.canvas.setLineCap(lcap[node.get('cap')]) if node.get('miterLimit'): self.canvas.setDash(utils.unit_get(node.get('miterLimit'))) if node.get('dash'): dashes = node.get('dash').split(',') for x in range(len(dashes)): dashes[x]=utils.unit_get(dashes[x]) self.canvas.setDash(node.get('dash').split(',')) def _image(self, node): import urllib import urlparse from reportlab.lib.utils import ImageReader nfile = node.get('file') if not nfile: if node.get('name'): image_data = self.images[node.get('name')] _logger.debug("Image %s used", node.get('name')) s = StringIO(image_data) else: newtext = node.text if self.localcontext: res = utils._regex.findall(newtext) for key in res: newtext = eval(key, {}, self.localcontext) or '' image_data = None if newtext: image_data = base64.decodestring(newtext) if image_data: s = StringIO(image_data) else: _logger.debug("No image data!") return False else: if nfile in self.images: s = StringIO(self.images[nfile]) else: try: up = urlparse.urlparse(str(nfile)) except ValueError: up = False if up and up.scheme: # RFC: do we really want to open external URLs? # Are we safe from cross-site scripting or attacks? _logger.debug("Retrieve image from %s", nfile) u = urllib.urlopen(str(nfile)) s = StringIO(u.read()) else: _logger.debug("Open image file %s ", nfile) s = _open_image(nfile, path=self.path) try: img = ImageReader(s) (sx,sy) = img.getSize() _logger.debug("Image is %dx%d", sx, sy) args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'} for tag in ('width','height','x','y'): if node.get(tag): args[tag] = utils.unit_get(node.get(tag)) if ('width' in args) and (not 'height' in args): args['height'] = sy * args['width'] / sx elif ('height' in args) and (not 'width' in args): args['width'] = sx * args['height'] / sy elif ('width' in args) and ('height' in args): if (float(args['width'])/args['height'])>(float(sx)>sy): args['width'] = sx * args['height'] / sy else: args['height'] = sy * args['width'] / sx self.canvas.drawImage(img, **args) finally: s.close() # self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas) def _path(self, node): self.path = self.canvas.beginPath() self.path.moveTo(**utils.attr_get(node, ['x','y'])) for n in utils._child_get(node, self): if not n.text : if n.tag=='moveto': vals = utils.text_get(n).split() self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1])) elif n.tag=='curvesto': vals = utils.text_get(n).split() while len(vals)>5: pos=[] while len(pos)<6: pos.append(utils.unit_get(vals.pop(0))) self.path.curveTo(*pos) elif n.text: data = n.text.split() # Not sure if I must merge all TEXT_NODE ? while len(data)>1: x = utils.unit_get(data.pop(0)) y = utils.unit_get(data.pop(0)) self.path.lineTo(x,y) if (not node.get('close')) or utils.bool_get(node.get('close')): self.path.close() self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'})) def setFont(self, node): fontname = select_fontname(node.get('name'), self.canvas._fontname) return self.canvas.setFont(fontname, utils.unit_get(node.get('size'))) def render(self, node): tags = { 'drawCentredString': self._drawCenteredString, 'drawRightString': self._drawRightString, 'drawString': self._drawString, 'rect': self._rect, 'ellipse': self._ellipse, 'lines': self._lines, 'grid': self._grid, 'curves': self._curves, 'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))), 'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))), 'setFont': self.setFont , 'place': self._place, 'circle': self._circle, 'lineMode': self._line_mode, 'path': self._path, 'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))), 'translate': self._translate, 'image': self._image } for n in utils._child_get(node, self): if n.tag in tags: tags[n.tag](n) class _rml_draw(object): def __init__(self, localcontext, node, styles, images=None, path='.', title=None): if images is None: images = {} self.localcontext = localcontext self.node = node self.styles = styles self.canvas = None self.images = images self.path = path self.canvas_title = title def render(self, canvas, doc): canvas.saveState() cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title) cnv.render(self.node) canvas.restoreState() class _rml_Illustration(platypus.flowables.Flowable): def __init__(self, node, localcontext, styles, self2): self.localcontext = (localcontext or {}).copy() self.node = node self.styles = styles self.width = utils.unit_get(node.get('width')) self.height = utils.unit_get(node.get('height')) self.self2 = self2 def wrap(self, *args): return self.width, self.height def draw(self): drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title) drw.render(self.canv, None) # Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting original_pto_split = platypus.flowables.PTOContainer.split def split(self, availWidth, availHeight): res = original_pto_split(self, availWidth, availHeight) if len(res) > 2 and len(self._content) > 0: header = self._content[0]._ptoinfo.header trailer = self._content[0]._ptoinfo.trailer if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]): return [] return res platypus.flowables.PTOContainer.split = split class _rml_flowable(object): def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None): if images is None: images = {} self.localcontext = localcontext self.doc = doc self.styles = doc.styles self.images = images self.path = path self.title = title self.canvas = canvas def _textual(self, node): rc1 = utils._process_text(self, node.text or '') for n in utils._child_get(node,self): txt_n = copy.deepcopy(n) for key in txt_n.attrib.keys(): if key in ('rml_except', 'rml_loop', 'rml_tag'): del txt_n.attrib[key] if not n.tag == 'bullet': if n.tag == 'pageNumber': txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or '' else: txt_n.text = utils.xml2str(self._textual(n)) txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or '' rc1 += etree.tostring(txt_n) return rc1 def _table(self, node): children = utils._child_get(node,self,'tr') if not children: return None length = 0 colwidths = None rowheights = None data = [] styles = [] posy = 0 for tr in children: paraStyle = None if tr.get('style'): st = copy.deepcopy(self.styles.table_styles[tr.get('style')]) for si in range(len(st._cmds)): s = list(st._cmds[si]) s[1] = (s[1][0],posy) s[2] = (s[2][0],posy) st._cmds[si] = tuple(s) styles.append(st) if tr.get('paraStyle'): paraStyle = self.styles.styles[tr.get('paraStyle')] data2 = [] posx = 0 for td in utils._child_get(tr, self,'td'): if td.get('style'): st = copy.deepcopy(self.styles.table_styles[td.get('style')]) for s in st._cmds: s[1][1] = posy s[2][1] = posy s[1][0] = posx s[2][0] = posx styles.append(st) if td.get('paraStyle'): # TODO: merge styles paraStyle = self.styles.styles[td.get('paraStyle')] posx += 1 flow = [] for n in utils._child_get(td, self): if n.tag == etree.Comment: n.text = '' continue fl = self._flowable(n, extra_style=paraStyle) if isinstance(fl,list): flow += fl else: flow.append( fl ) if not len(flow): flow = self._textual(td) data2.append( flow ) if len(data2)>length: length=len(data2) for ab in data: while len(ab)<length: ab.append('') while len(data2)<length: data2.append('') data.append( data2 ) posy += 1 if node.get('colWidths'): assert length == len(node.get('colWidths').split(',')) colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')] if node.get('rowHeights'): rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')] if len(rowheights) == 1: rowheights = rowheights[0] table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'}))) if node.get('style'): table.setStyle(self.styles.table_styles[node.get('style')]) for s in styles: table.setStyle(s) return table def _illustration(self, node): return _rml_Illustration(node, self.localcontext, self.styles, self) def _textual_image(self, node): return base64.decodestring(node.text) def _pto(self, node): sub_story = [] pto_header = None pto_trailer = None for node in utils._child_get(node, self): if node.tag == etree.Comment: node.text = '' continue elif node.tag=='pto_header': pto_header = self.render(node) elif node.tag=='pto_trailer': pto_trailer = self.render(node) else: flow = self._flowable(node) if flow: if isinstance(flow,list): sub_story = sub_story + flow else: sub_story.append(flow) return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header) def _flowable(self, node, extra_style=None): if node.tag=='pto': return self._pto(node) if node.tag=='para': style = self.styles.para_style_get(node) if extra_style: style.__dict__.update(extra_style) result = [] textuals = self._textual(node).split('\n') keep_empty_lines = (len(textuals) > 1) and len(node.text.strip()) for i in textuals: if keep_empty_lines and len(i.strip()) == 0: i = '<font color="white">&nbsp;</font>' result.append( platypus.Paragraph( i, style, **( utils.attr_get(node, [], {'bulletText':'str'})) ) ) return result elif node.tag=='barCode': try: from reportlab.graphics.barcode import code128 from reportlab.graphics.barcode import code39 from reportlab.graphics.barcode import code93 from reportlab.graphics.barcode import common from reportlab.graphics.barcode import fourstate from reportlab.graphics.barcode import usps from reportlab.graphics.barcode import createBarcodeDrawing except ImportError: _logger.warning("Cannot use barcode renderers:", exc_info=True) return None args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'}) codes = { 'codabar': lambda x: common.Codabar(x, **args), 'code11': lambda x: common.Code11(x, **args), 'code128': lambda x: code128.Code128(str(x), **args), 'standard39': lambda x: code39.Standard39(str(x), **args), 'standard93': lambda x: code93.Standard93(str(x), **args), 'i2of5': lambda x: common.I2of5(x, **args), 'extended39': lambda x: code39.Extended39(str(x), **args), 'extended93': lambda x: code93.Extended93(str(x), **args), 'msi': lambda x: common.MSI(x, **args), 'fim': lambda x: usps.FIM(x, **args), 'postnet': lambda x: usps.POSTNET(x, **args), 'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args), 'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args), } code = 'code128' if node.get('code'): code = node.get('code').lower() return codes[code](self._textual(node)) elif node.tag=='name': self.styles.names[ node.get('id')] = node.get('value') return None elif node.tag=='xpre': style = self.styles.para_style_get(node) return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'}))) elif node.tag=='pre': style = self.styles.para_style_get(node) return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'}))) elif node.tag=='illustration': return self._illustration(node) elif node.tag=='blockTable': return self._table(node) elif node.tag=='title': styles = reportlab.lib.styles.getSampleStyleSheet() style = styles['Title'] return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'}))) elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')): styles = reportlab.lib.styles.getSampleStyleSheet() style = styles['Heading'+str(node.tag[1:])] return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'}))) elif node.tag=='image': image_data = False if not node.get('file'): if node.get('name'): if node.get('name') in self.doc.images: _logger.debug("Image %s read ", node.get('name')) image_data = self.doc.images[node.get('name')].read() else: _logger.warning("Image %s not defined", node.get('name')) return False else: import base64 newtext = node.text if self.localcontext: newtext = utils._process_text(self, node.text or '') image_data = base64.decodestring(newtext) if not image_data: _logger.debug("No inline image data") return False image = StringIO(image_data) else: _logger.debug("Image get from file %s", node.get('file')) image = _open_image(node.get('file'), path=self.doc.path) return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height']))) elif node.tag=='spacer': if node.get('width'): width = utils.unit_get(node.get('width')) else: width = utils.unit_get('1cm') length = utils.unit_get(node.get('length')) return platypus.Spacer(width=width, height=length) elif node.tag=='section': return self.render(node) elif node.tag == 'pageNumberReset': return PageReset() elif node.tag in ('pageBreak', 'nextPage'): return platypus.PageBreak() elif node.tag=='condPageBreak': return platypus.CondPageBreak(**(utils.attr_get(node, ['height']))) elif node.tag=='setNextTemplate': return platypus.NextPageTemplate(str(node.get('name'))) elif node.tag=='nextFrame': return platypus.CondPageBreak(1000) # TODO: change the 1000 ! elif node.tag == 'setNextFrame': from reportlab.platypus.doctemplate import NextFrameFlowable return NextFrameFlowable(str(node.get('name'))) elif node.tag == 'currentFrame': from reportlab.platypus.doctemplate import CurrentFrameFlowable return CurrentFrameFlowable(str(node.get('name'))) elif node.tag == 'frameEnd': return EndFrameFlowable() elif node.tag == 'hr': width_hr=node.get('width') or '100%' color_hr=node.get('color') or 'black' thickness_hr=node.get('thickness') or 1 lineCap_hr=node.get('lineCap') or 'round' return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr)) else: sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,)) return None def render(self, node_story): def process_story(node_story): sub_story = [] for node in utils._child_get(node_story, self): if node.tag == etree.Comment: node.text = '' continue flow = self._flowable(node) if flow: if isinstance(flow,list): sub_story = sub_story + flow else: sub_story.append(flow) return sub_story return process_story(node_story) class EndFrameFlowable(ActionFlowable): def __init__(self,resume=0): ActionFlowable.__init__(self,('frameEnd',resume)) class TinyDocTemplate(platypus.BaseDocTemplate): def beforeDocument(self): # Store some useful value directly inside canvas, so it's available # on flowable drawing (needed for proper PageCount handling) self.canv._doPageReset = False self.canv._storyCount = 0 def ___handle_pageBegin(self): self.page += 1 self.pageTemplate.beforeDrawPage(self.canv,self) self.pageTemplate.checkPageSize(self.canv,self) self.pageTemplate.onPage(self.canv,self) for f in self.pageTemplate.frames: f._reset() self.beforePage() self._curPageFlowableCount = 0 if hasattr(self,'_nextFrameIndex'): del self._nextFrameIndex for f in self.pageTemplate.frames: if f.id == 'first': self.frame = f break self.handle_frameBegin() def afterPage(self): if self.canv._doPageReset: # Following a <pageReset/> tag: # - we reset page number to 0 # - we add an new PageCount flowable (relative to the current # story number), but not for NumeredCanvas at is handle page # count itself) # NOTE: _rml_template render() method add a PageReset flowable at end # of each story, so we're sure to pass here at least once per story. if not isinstance(self.canv, NumberedCanvas): self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ]) self.canv._pageCount = self.page self.page = 0 self.canv._flag = True self.canv._pageNumber = 0 self.canv._doPageReset = False self.canv._storyCount += 1 class _rml_template(object): def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None): if images is None: images = {} if not localcontext: localcontext={'internal_header':True} self.localcontext = localcontext self.images= images self.path = path self.title = title pagesize_map = {'a4': A4, 'us_letter': letter } pageSize = A4 if self.localcontext.get('company'): pageSize = pagesize_map.get(self.localcontext.get('company').paper_format, A4) if node.get('pageSize'): ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(',')) pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) ) self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'})) self.page_templates = [] self.styles = doc.styles self.doc = doc self.image=[] pts = node.findall('pageTemplate') for pt in pts: frames = [] for frame_el in pt.findall('frame'): frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) ) if utils.attr_get(frame_el, ['last']): frame.lastFrame = True frames.append( frame ) try : gr = pt.findall('pageGraphics')\ or pt[1].findall('pageGraphics') except Exception: # FIXME: be even more specific, perhaps? gr='' if len(gr): # self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext] drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title) self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) )) else: drw = _rml_draw(self.localcontext,node,self.doc,title=self.title) self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) )) self.doc_tmpl.addPageTemplates(self.page_templates) def render(self, node_stories): if self.localcontext and not self.localcontext.get('internal_header',False): del self.localcontext['internal_header'] fis = [] r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None) story_cnt = 0 for node_story in node_stories: if story_cnt > 0: fis.append(platypus.PageBreak()) fis += r.render(node_story) # Reset Page Number with new story tag fis.append(PageReset()) story_cnt += 1 try: if self.localcontext and self.localcontext.get('internal_header',False): self.doc_tmpl.afterFlowable(fis) self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas) else: self.doc_tmpl.build(fis) except platypus.doctemplate.LayoutError, e: e.name = 'Print Error' e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.' raise def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None): node = etree.XML(rml) r = _rml_doc(node, localcontext, images, path, title=title) #try to override some font mappings try: from customfonts import SetCustomFonts SetCustomFonts(r) except ImportError: # means there is no custom fonts mapping in this system. pass except Exception: _logger.warning('Cannot set font mapping', exc_info=True) pass fp = StringIO() r.render(fp) return fp.getvalue() def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None): node = etree.XML(rml) r = _rml_doc(node, localcontext, images, path, title=title) #try to override some font mappings try: from customfonts import SetCustomFonts SetCustomFonts(r) except Exception: pass if fout: fp = file(fout,'wb') r.render(fp) fp.close() return fout else: fp = StringIO() r.render(fp) return fp.getvalue() def trml2pdf_help(): print 'Usage: trml2pdf input.rml >output.pdf' print 'Render the standard input (RML) and output a PDF file' sys.exit(0) if __name__=="__main__": if len(sys.argv)>1: if sys.argv[1]=='--help': trml2pdf_help() print parseString(file(sys.argv[1], 'r').read()), else: print 'Usage: trml2pdf input.rml >output.pdf' print 'Try \'trml2pdf --help\' for more information.' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
erizhang/shadowsocks
refs/heads/rm
shadowsocks/eventloop.py
949
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2013-2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from ssloop # https://github.com/clowwindy/ssloop from __future__ import absolute_import, division, print_function, \ with_statement import os import time import socket import select import errno import logging from collections import defaultdict from shadowsocks import shell __all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR', 'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES'] POLL_NULL = 0x00 POLL_IN = 0x01 POLL_OUT = 0x04 POLL_ERR = 0x08 POLL_HUP = 0x10 POLL_NVAL = 0x20 EVENT_NAMES = { POLL_NULL: 'POLL_NULL', POLL_IN: 'POLL_IN', POLL_OUT: 'POLL_OUT', POLL_ERR: 'POLL_ERR', POLL_HUP: 'POLL_HUP', POLL_NVAL: 'POLL_NVAL', } # we check timeouts every TIMEOUT_PRECISION seconds TIMEOUT_PRECISION = 10 class KqueueLoop(object): MAX_EVENTS = 1024 def __init__(self): self._kqueue = select.kqueue() self._fds = {} def _control(self, fd, mode, flags): events = [] if mode & POLL_IN: events.append(select.kevent(fd, select.KQ_FILTER_READ, flags)) if mode & POLL_OUT: events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags)) for e in events: self._kqueue.control([e], 0) def poll(self, timeout): if timeout < 0: timeout = None # kqueue behaviour events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout) results = defaultdict(lambda: POLL_NULL) for e in events: fd = e.ident if e.filter == select.KQ_FILTER_READ: results[fd] |= POLL_IN elif e.filter == select.KQ_FILTER_WRITE: results[fd] |= POLL_OUT return results.items() def register(self, fd, mode): self._fds[fd] = mode self._control(fd, mode, select.KQ_EV_ADD) def unregister(self, fd): self._control(fd, self._fds[fd], select.KQ_EV_DELETE) del self._fds[fd] def modify(self, fd, mode): self.unregister(fd) self.register(fd, mode) def close(self): self._kqueue.close() class SelectLoop(object): def __init__(self): self._r_list = set() self._w_list = set() self._x_list = set() def poll(self, timeout): r, w, x = select.select(self._r_list, self._w_list, self._x_list, timeout) results = defaultdict(lambda: POLL_NULL) for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]: for fd in p[0]: results[fd] |= p[1] return results.items() def register(self, fd, mode): if mode & POLL_IN: self._r_list.add(fd) if mode & POLL_OUT: self._w_list.add(fd) if mode & POLL_ERR: self._x_list.add(fd) def unregister(self, fd): if fd in self._r_list: self._r_list.remove(fd) if fd in self._w_list: self._w_list.remove(fd) if fd in self._x_list: self._x_list.remove(fd) def modify(self, fd, mode): self.unregister(fd) self.register(fd, mode) def close(self): pass class EventLoop(object): def __init__(self): if hasattr(select, 'epoll'): self._impl = select.epoll() model = 'epoll' elif hasattr(select, 'kqueue'): self._impl = KqueueLoop() model = 'kqueue' elif hasattr(select, 'select'): self._impl = SelectLoop() model = 'select' else: raise Exception('can not find any available functions in select ' 'package') self._fdmap = {} # (f, handler) self._last_time = time.time() self._periodic_callbacks = [] self._stopping = False logging.debug('using event model: %s', model) def poll(self, timeout=None): events = self._impl.poll(timeout) return [(self._fdmap[fd][0], fd, event) for fd, event in events] def add(self, f, mode, handler): fd = f.fileno() self._fdmap[fd] = (f, handler) self._impl.register(fd, mode) def remove(self, f): fd = f.fileno() del self._fdmap[fd] self._impl.unregister(fd) def add_periodic(self, callback): self._periodic_callbacks.append(callback) def remove_periodic(self, callback): self._periodic_callbacks.remove(callback) def modify(self, f, mode): fd = f.fileno() self._impl.modify(fd, mode) def stop(self): self._stopping = True def run(self): events = [] while not self._stopping: asap = False try: events = self.poll(TIMEOUT_PRECISION) except (OSError, IOError) as e: if errno_from_exception(e) in (errno.EPIPE, errno.EINTR): # EPIPE: Happens when the client closes the connection # EINTR: Happens when received a signal # handles them as soon as possible asap = True logging.debug('poll:%s', e) else: logging.error('poll:%s', e) import traceback traceback.print_exc() continue for sock, fd, event in events: handler = self._fdmap.get(fd, None) if handler is not None: handler = handler[1] try: handler.handle_event(sock, fd, event) except (OSError, IOError) as e: shell.print_exception(e) now = time.time() if asap or now - self._last_time >= TIMEOUT_PRECISION: for callback in self._periodic_callbacks: callback() self._last_time = now def __del__(self): self._impl.close() # from tornado def errno_from_exception(e): """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instatiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, 'errno'): return e.errno elif e.args: return e.args[0] else: return None # from tornado def get_sock_error(sock): error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) return socket.error(error_number, os.strerror(error_number))
rhdedgar/openshift-tools
refs/heads/stg
openshift/installer/vendored/openshift-ansible-3.6.173/roles/lib_openshift/src/ansible/oc_volume.py
64
# pylint: skip-file # flake8: noqa def main(): ''' ansible oc module for volumes ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'), namespace=dict(default='default', type='str'), vol_name=dict(default=None, type='str'), name=dict(default=None, type='str'), mount_type=dict(default=None, choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'], type='str'), mount_path=dict(default=None, type='str'), # secrets require a name secret_name=dict(default=None, type='str'), # pvc requires a size claim_size=dict(default=None, type='str'), claim_name=dict(default=None, type='str'), # configmap requires a name configmap_name=dict(default=None, type='str'), ), supports_check_mode=True, ) rval = OCVolume.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main()
SUSE/kiwi
refs/heads/master
kiwi/oci_tools/buildah.py
1
# Copyright (c) 2019 SUSE Linux GmbH. All rights reserved. # # This file is part of kiwi. # # kiwi is free software: you can redistribute it and/or modify # it under the terms owf the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # kiwi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kiwi. If not, see <http://www.gnu.org/licenses/> # import logging import os import random import string # project from kiwi.oci_tools.base import OCIBase from kiwi.command import Command from kiwi.path import Path from kiwi.defaults import Defaults from kiwi.exceptions import KiwiBuildahError log = logging.getLogger('kiwi') class OCIBuildah(OCIBase): """ **Open Container Operations using buildah** """ def post_init(self): """ Initializes some default parameters """ self.working_image = None self.imported_image = None self.working_container = None def import_container_image(self, container_image_ref): """ Imports container image reference to the OCI containers storage. :param str container_image_ref: container image reference """ if not self.imported_image: self.imported_image = 'kiwi-image-{0}:{1}'.format( self._random_string_generator(), Defaults.get_container_base_image_tag() ) else: raise KiwiBuildahError( "Image already imported, called: '{0}'".format( self.imported_image ) ) # We are making use of skopeo instead of only calling 'buildah from' # because we want to control the image name loaded into the containers # storage. This way we are certain to not leave any left over after the # build. Command.run( [ 'skopeo', 'copy', container_image_ref, 'containers-storage:{0}'.format(self.imported_image) ] ) if not self.working_container: self.working_container = 'kiwi-container-{0}'.format( self._random_string_generator() ) else: raise KiwiBuildahError( "Container already initated, called: '{0}'".format( self.working_container ) ) Command.run( [ 'buildah', 'from', '--name', self.working_container, 'containers-storage:{0}'.format(self.imported_image) ] ) def export_container_image( self, filename, transport, image_ref, additional_refs=None ): """ Exports the working container to a container image archive :param str filename: The resulting filename :param str transport: The archive format :param str image_name: Name of the exported image :param str image_tag: Tag of the exported image :param list additional_tags: List of additional references """ extra_tags_opt = [] if additional_refs: for ref in additional_refs: extra_tags_opt.extend(['--additional-tag', ref]) # make sure the target tar file does not exist # skopeo doesn't support force overwrite Path.wipe(filename) if self.working_image: export_image = self.working_image elif self.imported_image: export_image = self.imported_image else: raise KiwiBuildahError("There is no image to export defined") # we are using 'skopeo copy' to export images instead of 'buildah push' # because buildah does not support multiple tags Command.run([ 'skopeo', 'copy', 'containers-storage:{0}'.format(export_image), '{0}:{1}:{2}'.format(transport, filename, image_ref) ] + extra_tags_opt) def init_container(self): """ Initialize a new container in OCI containers storage """ if not self.working_container: self.working_container = 'kiwi-container-{0}'.format( self._random_string_generator() ) else: raise KiwiBuildahError( "Image already imported or initated at '{0}' container".format( self.working_container ) ) Command.run( ['buildah', 'from', '--name', self.working_container, 'scratch'] ) def unpack(self): """ Mounts current container root data to a directory """ cmd = Command.run( ['buildah', 'mount', self.working_container] ) self.oci_root_dir = cmd.output.rstrip() def sync_rootfs(self, root_dir, exclude_list=None): """ Synchronizes the image root with the rootfs of the container :param string root_dir: root directory of the prepare step :param list exclude_list: list of paths to exclude """ self._sync_data( ''.join([root_dir, os.sep]), self.oci_root_dir, exclude_list=exclude_list, options=Defaults.get_sync_options() + ['--delete'] ) def import_rootfs(self, root_dir, exclude_list=None): """ Synchronizes the container rootfs with the root tree of the build :param string root_dir: root directory used in prepare step :param list exclude_list: list of paths to exclude """ self._sync_data( os.sep.join([self.oci_root_dir, '']), root_dir, exclude_list=exclude_list, options=Defaults.get_sync_options() ) def repack(self, oci_config): """ Pack root data directory into container image :param list oci_config: unused parameter """ Command.run( ['buildah', 'umount', self.working_container] ) def set_config(self, oci_config): """ Set list of meta data information such as entry_point, maintainer, etc... to the container. :param list oci_config: meta data list :param bool base_image: True|False """ config_args = self._process_oci_config_to_arguments(oci_config) Command.run( ['buildah', 'config'] + config_args + [self.working_container] ) def post_process(self): """ Commits the OCI container into an OCI image """ if not self.working_image and self.working_container: self.working_image = 'kiwi-image-{0}:{1}'.format( self._random_string_generator(), 'tag-{0}'.format( self._random_string_generator() ) ) else: raise KiwiBuildahError( "No container to commit or container already committed" ) output = Command.run( [ 'buildah', 'commit', '--rm', '--format', 'oci', self.working_container, self.working_image ] ) self.working_image = output.output.rstrip() self.working_container = None @classmethod def _process_oci_config_to_arguments(self, oci_config): """ Process the oci configuration dictionary into a list of arguments for the 'buildah config' command :param list oci_config: meta data list :return: List of buildah config arguments :rtype: list """ arguments = [] if 'maintainer' in oci_config: arguments.append( '--author={0}'.format(oci_config['maintainer']) ) if 'user' in oci_config: arguments.append( '--user={0}'.format(oci_config['user']) ) if 'workingdir' in oci_config: arguments.append( '--workingdir={0}'.format(oci_config['workingdir']) ) if 'entry_command' in oci_config: arguments.append('--entrypoint=[{0}]'.format( ','.join( ['"{0}"'.format(x) for x in oci_config['entry_command']] ) )) if 'entry_subcommand' in oci_config: arguments.append('--cmd={0}'.format( ' '.join(oci_config['entry_subcommand']) )) if 'volumes' in oci_config: for vol in oci_config['volumes']: arguments.append('--volume={0}'.format(vol)) if 'expose_ports' in oci_config: for port in oci_config['expose_ports']: arguments.append('--port={0}'.format(port)) if 'environment' in oci_config: for name in sorted(oci_config['environment']): arguments.append('--env={0}={1}'.format( name, oci_config['environment'][name] )) if 'labels' in oci_config: for name in sorted(oci_config['labels']): arguments.append('--label={0}={1}'.format( name, oci_config['labels'][name] )) if 'history' in oci_config: if 'comment' in oci_config['history']: arguments.append('--history-comment={0}'.format( oci_config['history']['comment'] )) if 'created_by' in oci_config['history']: arguments.append('--created-by={0}'.format( oci_config['history']['created_by'] )) if 'author' in oci_config['history']: log.warning('Author field in history is ignored using buildah') return arguments @classmethod def _random_string_generator( cls, chars_num=6, allchars=string.ascii_lowercase + string.digits ): """ Creates a random string with the given length of characters choosen randomly from a given list of possible characters. Buildah makes use of the hosts configured containers storage of OCI images. This method is used to avoid name collisions with any previous image or container present in the host. :param int chars_num: Lenght of the generated random string :param list allchars: List of possible characters :return: generated random string :rtype: string """ return "".join(random.choice(allchars) for x in range(chars_num)) def __del__(self): if self.working_container: Command.run(['buildah', 'umount', self.working_container]) Command.run(['buildah', 'rm', self.working_container]) if self.working_image: Command.run(['buildah', 'rmi', self.working_image]) if self.imported_image: Command.run(['buildah', 'rmi', self.imported_image])
kashif/scikit-learn
refs/heads/master
sklearn/svm/tests/test_svm.py
29
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from scipy import sparse from nose.tools import assert_raises, assert_true, assert_equal, assert_false from sklearn import svm, linear_model, datasets, metrics, base from sklearn.model_selection import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.exceptions import ChangedBehaviorWarning from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import NotFittedError from sklearn.multiclass import OneVsRestClassifier # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deterministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) @ignore_warnings def test_single_sample_1d(): # Test whether SVCs work on a single sample given as a 1-d array clf = svm.SVC().fit(X, Y) clf.predict(X[0]) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf.predict(X[0]) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1 assert np.abs(score1 - score2) < 0.1 def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM() clf.fit(X) pred = clf.predict(T) assert_array_almost_equal(pred, [-1, -1, -1]) assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]], decimal=3) assert_raises(ValueError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(probability=True, random_state=0, C=1.0), svm.NuSVC(probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) # check deprecation warning clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train) msg = "change the shape of the decision function" dec = assert_warns_message(ChangedBehaviorWarning, msg, clf.decision_function, X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) def test_weight(): # Test class weights clf = svm.SVC(class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC() clf.fit(X, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC() clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='weighted') <= metrics.f1_score(y, y_pred_balanced, average='weighted')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC() assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC().fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC() clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("l2", "squared_hinge", "loss='l2'", "1.0"), svm.LinearSVC(loss="l2").fit, X, y) # LinearSVR # loss l1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l1", "epsilon_insensitive", "loss='l1'", "1.0"), svm.LinearSVR(loss="l1").fit, X, y) # loss l2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) def test_linear_svx_uppercase_loss_penality_raises_error(): # Check if Upper case notation raises error at _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported", svm.LinearSVC(loss="SQuared_hinge").fit, X, y) assert_raise_message(ValueError, ("The combination of penalty='L2'" " and loss='squared_hinge' is not supported"), svm.LinearSVC(penalty="L2").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC() assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR() assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svc_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(max_iter=2, verbose=1) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data) def test_decision_function_shape_two_class(): for n_classes in [2, 3]: X, y = make_blobs(centers=n_classes, random_state=0) for estimator in [svm.SVC, svm.NuSVC]: clf = OneVsRestClassifier(estimator( decision_function_shape="ovr")).fit(X, y) assert_equal(len(clf.predict(X)), len(y))
sparkslabs/kamaelia_
refs/heads/master
Code/Python/Kamaelia/Kamaelia/SampleTemplateComponent.py
3
#!/usr/bin/env python2.3 # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """ Sample Template Component. Use this as the basis for your components! """ from Axon.Component import component, scheduler class CallbackStyleComponent(component): #Inboxes=["inbox","control"] List of inbox names if different #Outboxes=["outbox","signal"] List of outbox names if different #Usescomponents=[] # List of classes used. def __init__(self,label,looptimes,selfstart=0): super(CallbackStyleComponent, self).__init__() # !!!! Must happen, if this method exists self.looptimes = looptimes self.label = label if selfstart: self.activate() def initialiseComponent(self): print ("DEBUG:", self.label, "initialiseComponent") return 1 def mainBody(self): print ("DEBUG: ",self.label, "Now in the main loop") self.looptimes = self.looptimes -1 return self.looptimes def closeDownComponent(self): print ("DEBUG: ",self.label,"closeDownComponent") class StandardStyleComponent(component): #Inboxes=["inbox","control"] List of inbox names if different #Outboxes=["outbox","signal"] List of outbox names if different #Usescomponents=[] # List of classes used. def __init__(self,label,looptimes): super(CallbackStyleComponent, self).__init__() # !!!! Must happen, if this method exists self.looptimes = looptimes self.label = label def main(self): print ("DEBUG:", self.label, "initialiseComponent") yield 1 while 1: print ("DEBUG: ",self.label, "Now in the main loop") self.looptimes = self.looptimes -1 yield self.looptimes print ("DEBUG: ",self.label,"closeDownComponent") __kamaelia_components__ = ( CallbackStyleComponent, StandardStyleComponent ) if __name__ =="__main__": myComponent("A",3,1) myComponent("B",2).activate() scheduler.run.runThreads()
eLBati/odoo
refs/heads/master
openerp/cli/deploy.py
369
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import os import requests import sys import tempfile import zipfile from . import Command class Deploy(Command): """Deploy a module on an Odoo instance""" def __init__(self): super(Deploy, self).__init__() self.session = requests.session() def deploy_module(self, module_path, url, login, password, db='', force=False): url = url.rstrip('/') self.authenticate(url, login, password, db) module_file = self.zip_module(module_path) try: return self.upload_module(url, module_file, force=force) finally: os.remove(module_file) def upload_module(self, server, module_file, force=False): print("Uploading module file...") url = server + '/base_import_module/upload' files = dict(mod_file=open(module_file, 'rb')) force = '1' if force else '' res = self.session.post(url, files=files, data=dict(force=force)) if res.status_code != 200: raise Exception("Could not authenticate on server '%s'" % server) return res.text def authenticate(self, server, login, password, db=''): print("Authenticating on server '%s' ..." % server) # Fixate session with a given db if any self.session.get(server + '/web/login', params=dict(db=db)) args = dict(login=login, password=password, db=db) res = self.session.post(server + '/base_import_module/login', args) if res.status_code == 404: raise Exception("The server '%s' does not have the 'base_import_module' installed." % server) elif res.status_code != 200: raise Exception(res.text) def zip_module(self, path): path = os.path.abspath(path) if not os.path.isdir(path): raise Exception("Could not find module directory '%s'" % path) container, module_name = os.path.split(path) temp = tempfile.mktemp(suffix='.zip') try: print("Zipping module directory...") with zipfile.ZipFile(temp, 'w') as zfile: for root, dirs, files in os.walk(path): for file in files: file_path = os.path.join(root, file) zfile.write(file_path, file_path.split(container).pop()) return temp except Exception: os.remove(temp) raise def run(self, cmdargs): parser = argparse.ArgumentParser( prog="%s deploy" % sys.argv[0].split(os.path.sep)[-1], description=self.__doc__ ) parser.add_argument('path', help="Path of the module to deploy") parser.add_argument('url', nargs='?', help='Url of the server (default=http://localhost:8069)', default="http://localhost:8069") parser.add_argument('--db', dest='db', help='Database to use if server does not use db-filter.') parser.add_argument('--login', dest='login', default="admin", help='Login (default=admin)') parser.add_argument('--password', dest='password', default="admin", help='Password (default=admin)') parser.add_argument('--verify-ssl', action='store_true', help='Verify SSL certificate') parser.add_argument('--force', action='store_true', help='Force init even if module is already installed. (will update `noupdate="1"` records)') if not cmdargs: sys.exit(parser.print_help()) args = parser.parse_args(args=cmdargs) if not args.verify_ssl: self.session.verify = False try: if not args.url.startswith(('http://', 'https://')): args.url = 'https://%s' % args.url result = self.deploy_module(args.path, args.url, args.login, args.password, args.db, force=args.force) print(result) except Exception, e: sys.exit("ERROR: %s" % e)
giovaroma/bootstrap4
refs/heads/master
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py
2542
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the common.py file.""" import gyp.common import unittest import sys class TestTopologicallySorted(unittest.TestCase): def test_Valid(self): """Test that sorting works on a valid graph with one possible order.""" graph = { 'a': ['b', 'c'], 'b': [], 'c': ['d'], 'd': ['b'], } def GetEdge(node): return tuple(graph[node]) self.assertEqual( gyp.common.TopologicallySorted(graph.keys(), GetEdge), ['a', 'c', 'd', 'b']) def test_Cycle(self): """Test that an exception is thrown on a cyclic graph.""" graph = { 'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a'], } def GetEdge(node): return tuple(graph[node]) self.assertRaises( gyp.common.CycleError, gyp.common.TopologicallySorted, graph.keys(), GetEdge) class TestGetFlavor(unittest.TestCase): """Test that gyp.common.GetFlavor works as intended""" original_platform = '' def setUp(self): self.original_platform = sys.platform def tearDown(self): sys.platform = self.original_platform def assertFlavor(self, expected, argument, param): sys.platform = argument self.assertEqual(expected, gyp.common.GetFlavor(param)) def test_platform_default(self): self.assertFlavor('freebsd', 'freebsd9' , {}) self.assertFlavor('freebsd', 'freebsd10', {}) self.assertFlavor('openbsd', 'openbsd5' , {}) self.assertFlavor('solaris', 'sunos5' , {}); self.assertFlavor('solaris', 'sunos' , {}); self.assertFlavor('linux' , 'linux2' , {}); self.assertFlavor('linux' , 'linux3' , {}); def test_param(self): self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'}) if __name__ == '__main__': unittest.main()
restless/mezzanine-slider-revolution
refs/heads/master
slider_revolution/models.py
1
from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from mezzanine.conf import settings from mezzanine.core.fields import FileField, MultiChoiceField from mezzanine.core.models import Orderable from mezzanine.pages.models import Page from mezzanine.utils.models import upload_to from slider_revolution.options import SlideOptions, CaptionOptions @python_2_unicode_compatible class Slider(models.Model): """ A slider connected to a page """ slug = models.SlugField(max_length=200, null=True, blank=True) class Meta: verbose_name = _('Slider') verbose_name_plural = _('Sliders') def __str__(self): return self.slug @python_2_unicode_compatible class Slide(Orderable): """ A slide in a slider """ slider = models.ForeignKey(Slider, related_name="slides") image = FileField(verbose_name=_("Image"), upload_to=upload_to('slider_revolution.Slide.image', 'sliders'), format="Image", max_length=255, null=True, blank=True) # slide options transition = MultiChoiceField(max_length=500, choices=SlideOptions.TRANSITION_CHOICES, default=SlideOptions.BOXSLIDE) def __str__(self): return '{} | {}'.format(self.slider.slug, self.image) def image_thumb(self): return '<img src="{}{}" width="100" height="100" />'.format(settings.MEDIA_URL, self.image) image_thumb.allow_tags = True @python_2_unicode_compatible class SlideCaption(models.Model): slide = models.ForeignKey(Slide, related_name="captions") caption = models.CharField(max_length=200, null=True, blank=True) styling_caption = models.CharField(max_length=256, null=True, blank=True, help_text=_('It is the Wrapping main Class which is a MUST. Each Caption need to be defined like this, other way the Slider Plugin can not identifikate the Caption container')) incoming_animation = models.CharField(max_length=256, default=CaptionOptions.FADE, choices=CaptionOptions.INCOMING_ANIMATION_CLASSES_CHOICES, null=True, blank=True, help_text=_('Animation Classes defined the start / end animations on Captions')) outgoing_animation = models.CharField(max_length=256, default=CaptionOptions.FADEOUT, choices=CaptionOptions.OUTGOING_ANIMATION_CLASSES_CHOICES, null=True, blank=True, help_text=_('Animation Classes defined the start / end animations on Captions')) datax = models.CharField(max_length=256, default="300", null=True, blank=True, help_text=_('Possible Values are "left", "center", "right", or any Value between -2500 and 2500. If left/center/right is set, the caption will be siple aligned to the position. Any other "number" will simple set the left position in px of tha caption.')) datay = models.CharField(max_length=256, default="207", null=True, blank=True, help_text=_('Possible Values are "top", "center", "bottom", or any Value between -2500 and 2500. If top/center/bottom is set, the caption will be siple aligned to the position. Any other "number" will simple set the top position in px of tha caption.')) data_hoffset = models.CharField(max_length=256, default="", null=True, blank=True, help_text=_('Only works if data-x set to left/center/right. It will move the Caption with the defined "px" from the aligned position. i.e. data-x="center" data-hoffset="-100" will put the caption 100px left from the slide center horizontaly')) data_voffset = models.CharField(max_length=256, default="", null=True, blank=True, help_text=_('Only works if data-y set to top/center/bottom. It will move the Caption with the defined "px" from the aligned position. i.e. data-x="center" data-hoffset="-100" will put the caption 100px left from the slide center vertically.')) data_speed = models.CharField(max_length=256, default="300", null=True, blank=True, help_text=_('The speed in milliseconds of the transition to move the Caption in the Slide at the defined timepoint.')) data_start = models.CharField(max_length=256, default="800", null=True, blank=True, help_text=_('The timepoint in millisecond when/at the Caption should move in to the slide.')) data_endspeed = models.CharField(max_length=256, default="", null=True, blank=True, help_text=_('The speed in milliseconds of the transition to move the Caption out the Slide at the defined timepoint.')) data_end = models.CharField(max_length=256, default="", null=True, blank=True, help_text=_('The timepoint in millisecond when/at the Caption should move out from the slide.')) def __str__(self): return "{} | {}".format(self.slide, self.caption)
polyaxon/polyaxon
refs/heads/master
sdks/python/http_client/v1/polyaxon_sdk/models/v1_list_searches_response.py
1
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """ Polyaxon SDKs and REST API specification. Polyaxon SDKs and REST API specification. # noqa: E501 The version of the OpenAPI document: 1.10.0 Contact: contact@polyaxon.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from polyaxon_sdk.configuration import Configuration class V1ListSearchesResponse(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'count': 'int', 'results': 'list[V1Search]', 'previous': 'str', 'next': 'str' } attribute_map = { 'count': 'count', 'results': 'results', 'previous': 'previous', 'next': 'next' } def __init__(self, count=None, results=None, previous=None, next=None, local_vars_configuration=None): # noqa: E501 """V1ListSearchesResponse - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._count = None self._results = None self._previous = None self._next = None self.discriminator = None if count is not None: self.count = count if results is not None: self.results = results if previous is not None: self.previous = previous if next is not None: self.next = next @property def count(self): """Gets the count of this V1ListSearchesResponse. # noqa: E501 :return: The count of this V1ListSearchesResponse. # noqa: E501 :rtype: int """ return self._count @count.setter def count(self, count): """Sets the count of this V1ListSearchesResponse. :param count: The count of this V1ListSearchesResponse. # noqa: E501 :type: int """ self._count = count @property def results(self): """Gets the results of this V1ListSearchesResponse. # noqa: E501 :return: The results of this V1ListSearchesResponse. # noqa: E501 :rtype: list[V1Search] """ return self._results @results.setter def results(self, results): """Sets the results of this V1ListSearchesResponse. :param results: The results of this V1ListSearchesResponse. # noqa: E501 :type: list[V1Search] """ self._results = results @property def previous(self): """Gets the previous of this V1ListSearchesResponse. # noqa: E501 :return: The previous of this V1ListSearchesResponse. # noqa: E501 :rtype: str """ return self._previous @previous.setter def previous(self, previous): """Sets the previous of this V1ListSearchesResponse. :param previous: The previous of this V1ListSearchesResponse. # noqa: E501 :type: str """ self._previous = previous @property def next(self): """Gets the next of this V1ListSearchesResponse. # noqa: E501 :return: The next of this V1ListSearchesResponse. # noqa: E501 :rtype: str """ return self._next @next.setter def next(self, next): """Sets the next of this V1ListSearchesResponse. :param next: The next of this V1ListSearchesResponse. # noqa: E501 :type: str """ self._next = next def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ListSearchesResponse): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ListSearchesResponse): return True return self.to_dict() != other.to_dict()
rahmalik/trafficserver
refs/heads/master
tests/gold_tests/basic/copy_config.test.py
8
''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test.Summary = "Test start up of Traffic server with configuration modification of starting port of different servers at the same time" Test.SkipUnless(Condition.HasProgram("curl", "Curl needs to be installed on your system for this test to work")) # set up some ATS processes ts1 = Test.MakeATSProcess("ts1", select_ports=False) ts1.Setup.ts.CopyConfig('config/records_8090.config', 'records.config') ts2 = Test.MakeATSProcess("ts2", select_ports=False) ts2.Setup.ts.CopyConfig('config/records_8091.config', 'records.config') # setup a testrun t = Test.AddTestRun("Talk to ts1") t.StillRunningAfter = ts1 t.StillRunningAfter += ts2 p = t.Processes.Default p.Command = "curl 127.0.0.1:8090" p.ReturnCode = 0 p.StartBefore(Test.Processes.ts1, ready=When.PortOpen(8090)) p.StartBefore(Test.Processes.ts2, ready=When.PortOpen(8091)) # setup a testrun t = Test.AddTestRun("Talk to ts2") t.StillRunningBefore = ts1 t.StillRunningBefore += ts2 t.StillRunningAfter = ts1 t.StillRunningAfter += ts2 p = t.Processes.Default p.Command = "curl 127.0.0.1:8091" p.ReturnCode = 0
EnTeQuAk/dotfiles
refs/heads/master
sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/file_utils.py
4
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """Various utility functions used by this plugin""" from codecs import open as fopen from os import makedirs from os.path import isfile, isdir, dirname, join from uuid import uuid4 from .paths import get_root_dir def get_temp_file_path(): """Gets the path to a constant temporary file""" return join(get_root_dir(), str(uuid4())) def save_text_to_file(text, file_path): """Saves the given text to a file at the specified path""" handle = fopen(file_path, mode="w", encoding="utf-8") handle.write(text) handle.close() def save_text_to_temp_file(text): """Saves the given text to a temporary file""" temp_file_path = get_temp_file_path() save_text_to_file(text, temp_file_path) return temp_file_path def read_text_from_file(file_path, default_contents=None): """Reads the text from a file if it exists; if it doens't, some default contents are returned""" if isfile(file_path): handle = fopen(file_path, mode="r", encoding="utf-8") text = handle.read() handle.close() return text return default_contents def ensure_file(file_path, default_contents=None): """Ensures a file exists; if it doesn't, one is created with some contents""" if not isdir(dirname(file_path)): makedirs(dirname(file_path)) if not isfile(file_path): handle = fopen(file_path, mode="w", encoding="utf-8") handle.write(default_contents) handle.close() return file_path
ioram7/keystone-federado-pgid2013
refs/heads/master
build/requests/build/lib.linux-x86_64-2.7/requests/sessions.py
58
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os from collections import Mapping from datetime import datetime from .compat import cookielib, OrderedDict, urljoin, urlparse from .cookies import cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar from .models import Request, PreparedRequest from .hooks import default_hooks, dispatch_hook from .utils import to_key_val_list, default_headers from .exceptions import TooManyRedirects, InvalidSchema from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import requote_uri, get_environ_proxies, get_netrc_auth from .status_codes import codes REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_moved, # 307 ) DEFAULT_REDIRECT_LIMIT = 30 def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """ Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. for (k, v) in request_setting.items(): if v is None: del merged_setting[k] return merged_setting class SessionRedirectMixin(object): def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Receives a Response. Returns a generator of Responses.""" i = 0 prepared_request = PreparedRequest() prepared_request.body = req.body prepared_request.headers = req.headers.copy() prepared_request.hooks = req.hooks prepared_request.method = req.method prepared_request.url = req.url # ((resp.status_code is codes.see_other)) while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)): resp.content # Consume socket so it can be released if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) # Release the connection back into the pool. resp.close() url = resp.headers['location'] method = prepared_request.method # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) # Facilitate non-RFC2616-compliant 'location' headers # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not urlparse(url).netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = url # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 if (resp.status_code == codes.see_other and prepared_request.method != 'HEAD'): method = 'GET' # Do what the browsers do, despite standards... if (resp.status_code in (codes.moved, codes.found) and prepared_request.method not in ('GET', 'HEAD')): method = 'GET' prepared_request.method = method # https://github.com/kennethreitz/requests/issues/1084 if resp.status_code not in (codes.temporary, codes.resume): if 'Content-Length' in prepared_request.headers: del prepared_request.headers['Content-Length'] prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass prepared_request.prepare_cookies(self.cookies) resp = self.send( prepared_request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) i += 1 yield resp class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistience, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') 200 """ __attrs__ = [ 'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects'] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol to the URL of the proxy (e.g. #: {'http': 'foo.bar:3128'}) to be used on each #: :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL certificate default. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Should we trust the environment? self.trust_env = True # Set up a CookieJar to be used by default self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request. :param allow_redirects: (optional) Boolean. Set to True by default. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ cookies = cookies or {} proxies = proxies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = RequestsCookieJar() merged_cookies.update(self.cookies) merged_cookies.update(cookies) cookies = merged_cookies # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. env_proxies = get_environ_proxies(url) or {} for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Set environment's basic authentication. if not auth: auth = get_netrc_auth(url) # Look for configuration. if not verify and verify is not False: verify = os.environ.get('REQUESTS_CA_BUNDLE') # Curl compatibility. if not verify and verify is not False: verify = os.environ.get('CURL_CA_BUNDLE') # Merge all the kwargs. params = merge_setting(params, self.params) headers = merge_setting(headers, self.headers, dict_class=CaseInsensitiveDict) auth = merge_setting(auth, self.auth) proxies = merge_setting(proxies, self.proxies) hooks = merge_setting(hooks, self.hooks) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) # Create the Request. req = Request() req.method = method.upper() req.url = url req.headers = headers req.files = files req.data = data req.params = params req.auth = auth req.cookies = cookies req.hooks = hooks # Prepare the Request. prep = req.prepare() # Send the request. send_kwargs = { 'stream': stream, 'timeout': timeout, 'verify': verify, 'cert': cert, 'proxies': proxies, 'allow_redirects': allow_redirects, } resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return self.request('POST', url, data=data, **kwargs) def put(self, url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest.""" # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if getattr(request, 'prepare', None): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of # hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') timeout = kwargs.get('timeout') verify = kwargs.get('verify') cert = kwargs.get('cert') proxies = kwargs.get('proxies') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = datetime.utcnow() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) r.elapsed = datetime.utcnow() - start # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = tuple(history) return r def get_adapter(self, url): """Returns the appropriate connnection adapter for the given URL.""" for (prefix, adapter) in self.adapters.items(): if url.startswith(prefix): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for _, v in self.adapters.items(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by key length.""" self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """Returns a :class:`Session` for context-management.""" return Session()
sujithshankar/anaconda
refs/heads/master
tests/glade/check_mnemonics.py
9
# # Copyright (C) 2015 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gladecheck import GladeTest class CheckMnemonics(GladeTest): def checkGlade(self, glade_tree): """Check for widgets with keyboard accelerators but no mnemonic""" # Look for labels with use-underline=True and no mnemonic-widget for label in glade_tree.xpath(".//object[@class='GtkLabel' and ./property[@name='use_underline' and ./text() = 'True'] and not(./property[@name='mnemonic_widget'])]"): # And now filter out the cases where the label actually does have a mnemonic. # This list is not comprehensive, probably. parent = label.getparent() # Is the label the child of a GtkButton? The button might be pretty far up there. # Assume widget names that end in "Button" are subclasses of GtkButton if parent.tag == 'child' and \ label.xpath("ancestor::object[substring(@class, string-length(@class) - string-length('Button') + 1) = 'Button']"): continue # Is the label a GtkNotebook tab? if parent.tag == 'child' and parent.get('type') == 'tab' and \ parent.getparent().get('class') == 'GtkNotebook': continue raise AssertionError("Label with accelerator and no mnemonic at %s:%d" % (label.base, label.sourceline))
MediaMath/Diamond
refs/heads/master
src/collectors/openstackswiftrecon/openstackswiftrecon.py
8
# coding=utf-8 """ Openstack Swift Recon collector. Reads any present recon cache files and reports their current metrics. #### Dependencies * Running Swift services must have a recon enabled """ import os try: import json json # workaround for pyflakes issue #13 except ImportError: import simplejson as json import diamond.collector class OpenstackSwiftReconCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(OpenstackSwiftReconCollector, self).get_default_config_help() config_help.update({ 'recon_account_cache': 'path to swift recon account cache ' '(default /var/cache/swift/account.recon)', 'recon_container_cache': 'path to swift recon container cache ' '(default /var/cache/swift/container.recon)', 'recon_object_cache': 'path to swift recon object cache ' '(default /var/cache/swift/object.recon)' }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(OpenstackSwiftReconCollector, self).get_default_config() config.update({ 'path': 'swiftrecon', 'recon_account_cache': '/var/cache/swift/account.recon', 'recon_container_cache': '/var/cache/swift/container.recon', 'recon_object_cache': '/var/cache/swift/object.recon', 'method': 'Threaded', 'interval': 300, }) return config def _process_cache(self, d, path=()): """Recusively walk a nested recon cache dict to obtain path/values""" for k, v in d.iteritems(): if not isinstance(v, dict): self.metrics.append((path + (k,), v)) else: self._process_cache(v, path + (k,)) def collect(self): self.metrics = [] recon_cache = {'account': self.config['recon_account_cache'], 'container': self.config['recon_container_cache'], 'object': self.config['recon_object_cache']} for recon_type in recon_cache: if not os.access(recon_cache[recon_type], os.R_OK): continue try: f = open(recon_cache[recon_type]) try: rmetrics = json.loads(f.readlines()[0].strip()) self.metrics = [] self._process_cache(rmetrics) for k, v in self.metrics: metric_name = '%s.%s' % (recon_type, ".".join(k)) if isinstance(v, (int, float)): self.publish(metric_name, v) except (ValueError, IndexError): continue finally: f.close()
jcrist/blaze
refs/heads/master
blaze/expr/tests/test_method_dispatch.py
18
from blaze.expr.method_dispatch import select_functions, name, partial def iseven(n): return isinstance(n, int) and n % 2 == 0 def inc(x): return Foo(x.data + 1) def dec(x): return Foo(x.data - 1) def lower(x): return Foo(x.data.lower()) def upper(x): return Foo(x.data.upper()) def halve(x): return Foo(x.data // 2) def isnine(x): return True methods = [(int, set([inc, dec])), (str, set([lower, upper])), (iseven, set([halve])), (9, set([isnine]))] def test_select_functions(): assert select_functions(methods, 3) == {'inc': inc, 'dec': dec} assert select_functions(methods, 4) == {'inc': inc, 'dec': dec, 'halve': halve} assert select_functions(methods, 'A') == {'lower': lower, 'upper': upper} assert select_functions(methods, 9) == {'inc': inc, 'dec': dec, 'isnine': isnine} def test_name(): assert name(inc) == 'inc' assert name(partial(inc)) == name(inc)
tempbottle/Nuitka
refs/heads/develop
tests/basics/MinimalClass.py
2
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com # # Python tests originally created or extracted from other peoples work. The # parts were too small to be protected. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Very minimal class example, to be used for debugging. a = 1 class B: b = a print(B.b)
cheungpat/rq-scheduler
refs/heads/master
rq_scheduler/__init__.py
4
VERSION = (0, 5, 1) from .scheduler import Scheduler
shashank971/edx-platform
refs/heads/master
common/test/acceptance/pages/studio/settings.py
51
# coding: utf-8 """ Course Schedule and Details Settings page. """ from __future__ import unicode_literals from bok_choy.promise import EmptyPromise from .course_page import CoursePage from .utils import press_the_notification_button class SettingsPage(CoursePage): """ Course Schedule and Details Settings page. """ url_path = "settings/details" ################ # Helpers ################ def is_browser_on_page(self): return self.q(css='body.view-settings').present def refresh_and_wait_for_load(self): """ Refresh the page and wait for all resources to load. """ self.browser.refresh() self.wait_for_page() def get_elements(self, css_selector): self.wait_for_element_presence( css_selector, 'Elements matching "{}" selector are present'.format(css_selector) ) results = self.q(css=css_selector) return results def get_element(self, css_selector): results = self.get_elements(css_selector=css_selector) return results[0] if results else None ################ # Properties ################ @property def pre_requisite_course_options(self): """ Returns the pre-requisite course drop down field options. """ self.wait_for_element_visibility( '#pre-requisite-course', 'Prerequisite course element is available' ) return self.get_elements('#pre-requisite-course') @property def entrance_exam_field(self): """ Returns the enable entrance exam checkbox. """ self.wait_for_element_visibility( '#entrance-exam-enabled', 'Entrance exam checkbox is available' ) return self.get_element('#entrance-exam-enabled') @property def alert_confirmation_title(self): """ Returns the alert confirmation element, which contains text such as 'Your changes have been saved.' """ self.wait_for_element_visibility( '#alert-confirmation-title', 'Alert confirmation title element is available' ) return self.get_element('#alert-confirmation-title') @property def course_license(self): """ Property. Returns the text of the license type for the course ("All Rights Reserved" or "Creative Commons") """ license_types_css = "section.license ul.license-types li.license-type" self.wait_for_element_presence( license_types_css, "license type buttons are present", ) selected = self.q(css=license_types_css + " button.is-selected") if selected.is_present(): return selected.text[0] # Look for the license text that will be displayed by default, # if no button is yet explicitly selected license_text = self.q(css='section.license span.license-text') if license_text.is_present(): return license_text.text[0] return None @course_license.setter def course_license(self, license_name): """ Sets the course license to the given license_name (str, "All Rights Reserved" or "Creative Commons") """ license_types_css = "section.license ul.license-types li.license-type" self.wait_for_element_presence( license_types_css, "license type buttons are present", ) button_xpath = ( "//section[contains(@class, 'license')]" "//ul[contains(@class, 'license-types')]" "//li[contains(@class, 'license-type')]" "//button[contains(text(),'{license_name}')]" ).format(license_name=license_name) button = self.q(xpath=button_xpath) if not button.present: raise Exception("Invalid license name: {name}".format(name=license_name)) button.click() ################ # Waits ################ def wait_for_prerequisite_course_options(self): """ Ensure the pre_requisite_course_options dropdown selector is displayed """ EmptyPromise( lambda: self.q(css="#pre-requisite-course").present, 'Prerequisite course dropdown selector is displayed' ).fulfill() ################ # Clicks ################ ################ # Workflows ################ def require_entrance_exam(self, required=True): """ Set the entrance exam requirement via the checkbox. """ checkbox = self.entrance_exam_field selected = checkbox.is_selected() if required and not selected: checkbox.click() self.wait_for_element_visibility( '#entrance-exam-minimum-score-pct', 'Entrance exam minimum score percent is visible' ) if not required and selected: checkbox.click() self.wait_for_element_invisibility( '#entrance-exam-minimum-score-pct', 'Entrance exam minimum score percent is invisible' ) def save_changes(self, wait_for_confirmation=True): """ Clicks save button, waits for confirmation unless otherwise specified """ press_the_notification_button(self, "save") if wait_for_confirmation: self.wait_for_element_visibility( '#alert-confirmation-title', 'Save confirmation message is visible' ) def refresh_page(self, wait_for_confirmation=True): """ Reload the page. """ self.browser.refresh() if wait_for_confirmation: EmptyPromise( lambda: self.q(css='body.view-settings').present, 'Page is refreshed' ).fulfill() self.wait_for_ajax()
zycdragonball/tensorflow
refs/heads/master
tensorflow/contrib/saved_model/python/saved_model/signature_def_utils.py
113
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SignatureDef utility functions implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def get_signature_def_by_key(meta_graph_def, signature_def_key): """Utility function to get a SignatureDef protocol buffer by its key. Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefMap to look up. signature_def_key: Key of the SignatureDef protocol buffer to find in the SignatureDefMap. Returns: A SignatureDef protocol buffer corresponding to the supplied key, if it exists. Raises: ValueError: If no entry corresponding to the supplied key is found in the SignatureDefMap of the MetaGraphDef. """ if signature_def_key not in meta_graph_def.signature_def: raise ValueError("No SignatureDef with key '%s' found in MetaGraphDef." % signature_def_key) return meta_graph_def.signature_def[signature_def_key]
ctuning/ck-env
refs/heads/master
package/tool-bazel-0.6.1-win/custom.py
2
# # Collective Knowledge workflow framework # # Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net # import os import sys import json ############################################################################## # customize installation def setup(i): """ Input: { cfg - meta of this soft entry self_cfg - meta of module soft ck_kernel - import CK kernel module (to reuse functions) host_os_uoa - host OS UOA host_os_uid - host OS UID host_os_dict - host OS meta target_os_uoa - target OS UOA target_os_uid - target OS UID target_os_dict - target OS meta target_device_id - target device ID (if via ADB) tags - list of tags used to search this entry env - updated environment vars from meta customize - updated customize vars from meta deps - resolved dependencies for this soft interactive - if 'yes', can ask questions, otherwise quiet path - path to entry (with scripts) install_path - installation path } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 (install_env) - prepare environment to be used before the install script } """ import os import shutil # Get variables o=i.get('out','') ck=i['ck_kernel'] hos=i['host_os_uoa'] tos=i['target_os_uoa'] hosd=i['host_os_dict'] tosd=i['target_os_dict'] hbits=hosd.get('bits','') tbits=tosd.get('bits','') hname=hosd.get('ck_name','') # win, linux hname2=hosd.get('ck_name2','') # win, mingw, linux, android macos=hosd.get('macos','') # yes/no hft=i.get('features',{}) # host platform features habi=hft.get('os',{}).get('abi','') # host ABI (only for ARM-based); if you want to get target ABI, use tosd ... # armv7l, etc... p=i['path'] env=i['env'] pi=i.get('install_path','') cus=i['customize'] ie=cus.get('install_env',{}) nie={} # new env # Update vars f=ie['PACKAGE_NAME_TEMPLATE'] if macos=='yes': if hbits!='64': return {'return':1, 'error':'this package doesn\'t support non 64-bit MacOS'} f+='installer-darwin-x86_64.sh' nie['PACKAGE_SKIP_LINUX_MAKE']='YES' nie['PACKAGE_RUN']='YES' nie['PACKAGE_CMD']='--prefix='+pi elif hname=='win': if hbits!='64': return {'return':1, 'error':'this package doesn\'t support non 64-bit Windows'} f+='windows-x86_64.zip' nie['PACKAGE_WGET_EXTRA']=ie['PACKAGE_WGET_EXTRA']+' -O '+f nie['PACKAGE_UNZIP']='YES' else: if hbits!='64': return {'return':1, 'error':'this package doesn\'t support non 64-bit Linux'} f+='installer-linux-x86_64.sh' nie['PACKAGE_SKIP_LINUX_MAKE']='YES' nie['PACKAGE_RUN']='YES' nie['PACKAGE_CMD']='--prefix='+pi nie['PACKAGE_NAME']=f # nie['PACKAGE_WGET_EXTRA']=ie['PACKAGE_WGET_EXTRA']+' -O '+f return {'return':0, 'install_env':nie}
vadimtk/chrome4sdp
refs/heads/master
build/gyp_chromium_test.py
27
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import unittest SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) SRC_DIR = os.path.dirname(SCRIPT_DIR) sys.path.append(os.path.join(SRC_DIR, 'third_party', 'pymock')) import mock import gyp_chromium class TestGetOutputDirectory(unittest.TestCase): @mock.patch('os.environ', {}) @mock.patch('sys.argv', [__file__]) def testDefaultValue(self): self.assertEqual(gyp_chromium.GetOutputDirectory(), 'out') @mock.patch('os.environ', {'GYP_GENERATOR_FLAGS': 'output_dir=envfoo'}) @mock.patch('sys.argv', [__file__]) def testEnvironment(self): self.assertEqual(gyp_chromium.GetOutputDirectory(), 'envfoo') @mock.patch('os.environ', {'GYP_GENERATOR_FLAGS': 'output_dir=envfoo'}) @mock.patch('sys.argv', [__file__, '-Goutput_dir=cmdfoo']) def testGFlagOverridesEnv(self): self.assertEqual(gyp_chromium.GetOutputDirectory(), 'cmdfoo') @mock.patch('os.environ', {}) @mock.patch('sys.argv', [__file__, '-G', 'output_dir=foo']) def testGFlagWithSpace(self): self.assertEqual(gyp_chromium.GetOutputDirectory(), 'foo') class TestGetGypVars(unittest.TestCase): @mock.patch('os.environ', {}) def testDefault(self): self.assertEqual(gyp_chromium.GetGypVars([]), {}) @mock.patch('os.environ', {}) @mock.patch('sys.argv', [__file__, '-D', 'foo=bar']) def testDFlags(self): self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': 'bar'}) @mock.patch('os.environ', {}) @mock.patch('sys.argv', [__file__, '-D', 'foo']) def testDFlagsNoValue(self): self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': '1'}) @mock.patch('os.environ', {}) @mock.patch('sys.argv', [__file__, '-D', 'foo=bar', '-Dbaz']) def testDFlagMulti(self): self.assertEqual(gyp_chromium.GetGypVars([]), {'foo': 'bar', 'baz': '1'}) if __name__ == '__main__': unittest.main()
aperigault/ansible
refs/heads/devel
lib/ansible/modules/system/debconf.py
44
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: debconf short_description: Configure a .deb package description: - Configure a .deb package using debconf-set-selections. - Or just query existing selections. version_added: "1.6" notes: - This module requires the command line debconf tools. - A number of questions have to be answered (depending on the package). Use 'debconf-show <package>' on any Debian or derivative with the package installed to see questions/settings available. - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. requirements: - debconf - debconf-utils options: name: description: - Name of package to configure. type: str required: true aliases: [ pkg ] question: description: - A debconf configuration setting. type: str aliases: [ selection, setting ] vtype: description: - The type of the value supplied. - C(seen) was added in Ansible 2.2. type: str choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ] value: description: - Value to set the configuration to. type: str aliases: [ answer ] unseen: description: - Do not set 'seen' flag when pre-seeding. type: bool default: no author: - Brian Coca (@bcoca) ''' EXAMPLES = r''' - name: Set default locale to fr_FR.UTF-8 debconf: name: locales question: locales/default_environment_locale value: fr_FR.UTF-8 vtype: select - name: set to generate locales debconf: name: locales question: locales/locales_to_be_generated value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8 vtype: multiselect - name: Accept oracle license debconf: name: oracle-java7-installer question: shared/accepted-oracle-license-v1-1 value: 'true' vtype: select - name: Specifying package you can register/return the list of questions and current values debconf: name: tzdata ''' from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) if rc != 0: module.fail_json(msg=err) selections = {} for line in out.splitlines(): (key, value) = line.split(':', 1) selections[key.strip('*').strip()] = value.strip() return selections def set_selection(module, pkg, question, vtype, value, unseen): setsel = module.get_bin_path('debconf-set-selections', True) cmd = [setsel] if unseen: cmd.append('-u') if vtype == 'boolean': if value == 'True': value = 'true' elif value == 'False': value = 'false' data = ' '.join([pkg, question, vtype, value]) return module.run_command(cmd, data=data) def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['pkg']), question=dict(type='str', aliases=['selection', 'setting']), vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']), value=dict(type='str', aliases=['answer']), unseen=dict(type='bool'), ), required_together=(['question', 'vtype', 'value'],), supports_check_mode=True, ) # TODO: enable passing array of options and/or debconf file from get-selections dump pkg = module.params["name"] question = module.params["question"] vtype = module.params["vtype"] value = module.params["value"] unseen = module.params["unseen"] prev = get_selections(module, pkg) changed = False msg = "" if question is not None: if vtype is None or value is None: module.fail_json(msg="when supplying a question you must supply a valid vtype and value") # if question doesn't exist, value cannot match if question not in prev: changed = True else: existing = prev[question] # ensure we compare booleans supplied to the way debconf sees them (true/false strings) if vtype == 'boolean': value = to_text(value).lower() existing = to_text(prev[question]).lower() if value != existing: changed = True if changed: if not module.check_mode: rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) if rc: module.fail_json(msg=e) curr = {question: value} if question in prev: prev = {question: prev[question]} else: prev[question] = '' if module._diff: after = prev.copy() after.update(curr) diff_dict = {'before': prev, 'after': after} else: diff_dict = {} module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict) module.exit_json(changed=changed, msg=msg, current=prev) if __name__ == '__main__': main()
jmesteve/medical
refs/heads/master
openerp/tools/import_email.py
105
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import os, sys import re import smtplib import email, mimetypes from email.Header import decode_header from email.MIMEText import MIMEText import xmlrpclib warn_msg = """ Bonjour, Le message avec le sujet "%s" n'a pu être archivé dans l'ERP. """.decode('utf-8') class EmailParser(object): def __init__(self, headers, dispatcher): self.headers = headers self.dispatcher = dispatcher def parse(self, msg): dispatcher((self.headers, msg)) class CommandDispatcher(object): def __init__(self, receiver): self.receiver = receiver def __call__(self, request): return self.receiver(request) class RPCProxy(object): def __init__(self, uid, passwd, host='localhost', port=8069, path='object'): self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path)) self.user_id = uid self.passwd = passwd def __call__(self, request): return self.rpc.execute(self.user_id, self.passwd, *request) class ReceiverEmail2Event(object): email_re = re.compile(r""" ([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part @ # mandatory @ sign [a-zA-Z0-9][\w\.-]* # domain must start with a letter \. [a-z]{2,3} # TLD ) """, re.VERBOSE) project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE) def __init__(self, rpc): self.rpc = rpc def get_addresses(self, headers, msg): hcontent = '' for header in [h for h in headers if msg.has_key(h)]: hcontent += msg[header] return self.email_re.findall(hcontent) def get_partners(self, headers, msg): alladdresses = self.get_addresses(headers, msg) address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)])) addresses = self.rpc(('res.partner', 'read', address_ids)) return [x['partner_id'][0] for x in addresses] def __call__(self, request): headers, msg = request partners = self.get_partners(headers, msg) subject = u'' for string, charset in decode_header(msg['Subject']): if charset: subject += string.decode(charset) else: subject += unicode(string) if partners: self.save_mail(msg, subject, partners) else: warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8') warning['Subject'] = 'Message de OpenERP' warning['From'] = 'erp@steel-sa.com' warning['To'] = msg['From'] s = smtplib.SMTP() s.connect() s.sendmail('erp@steel-sa.com', self.email_re.findall(msg['From']), warning.as_string()) s.close() if msg.is_multipart(): for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']: self((headers, message.get_payload()[0])) def save_mail(self, msg, subject, partners): counter, description = 1, u'' if msg.is_multipart(): for part in msg.get_payload(): stockdir = os.path.join('emails', msg['Message-Id'][1:-1]) newdir = os.path.join('/tmp', stockdir) filename = part.get_filename() if not filename: ext = mimetypes.guess_extension(part.get_type()) if not ext: ext = '.bin' filename = 'part-%03d%s' % (counter, ext) if part.get_content_maintype() == 'multipart': continue elif part.get_content_maintype() == 'text': if part.get_content_subtype() == 'plain': description += part.get_payload(decode=1).decode(part.get_charsets()[0]) description += u'\n\nVous trouverez les éventuels fichiers dans le répertoire: %s' % stockdir continue else: description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le répertoire: %s' % (part.get_content_type(), stockdir) elif part.get_content_type() == 'message/rfc822': continue if not os.path.isdir(newdir): os.mkdir(newdir) counter += 1 fd = file(os.path.join(newdir, filename), 'w') fd.write(part.get_payload(decode=1)) fd.close() else: description = msg.get_payload(decode=1).decode(msg.get_charsets()[0]) project = self.project_re.search(subject) if project: project = project.groups()[0] else: project = '' for partner in partners: self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project})) if __name__ == '__main__': rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin')) dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher)) parser = EmailParser(['To', 'Cc', 'From'], dispatcher) parser.parse(email.message_from_file(sys.stdin)) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
groschovskiy/gsa-admin-toolkit
refs/heads/master
sso_test.py
3
#!/usr/bin/python2.4 # # Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for SSO server. The SSO server must be running on localhost port 8080. We test the following conditions: with right cookies without auth cookie bad cookie test_cookie_path test_cookie_path enabled disabled enabled disabled /public x x /secure x x x x /login x x x /form x x x /obrareq x """ __author__ = 'jlowry@google.com (John Lowry)' import getopt import urllib import urllib2 import cookielib import sys import unittest import sso class CustomRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): return None def http_error_303(self, req, fp, code, msg, headers): return None class Error(Exception): """Base exception class for this module.""" pass class SsoUnitTestException(Error): """Exception thrown when running the unit tests..""" pass class SsoUnitTest(unittest.TestCase): url_prefix = "http://localhost:8080" login = "test1" cookie_domain = ".foo.com" search_host = "search.foo.com" def setUp(self): url = "%s/testcookiepath" % (SsoUnitTest.url_prefix) f = urllib2.urlopen(url) val = f.read() if val.find("True") > -1: self.test_cookie_path = 1 else: self.test_cookie_path = 0 url = "%s/set_cookie_domain" % (SsoUnitTest.url_prefix) f = urllib2.urlopen(url) self.cookie_domain = f.read() url = "%s/set_search_host" % (SsoUnitTest.url_prefix) f = urllib2.urlopen(url) self.search_host = f.read() def tearDown(self): url = "%s/testcookiepath?value=%s" % (SsoUnitTest.url_prefix, self.test_cookie_path) f = urllib2.urlopen(url) url = "%s/set_cookie_domain?value=%s" % (SsoUnitTest.url_prefix, self.cookie_domain) f = urllib2.urlopen(url) url = "%s/set_search_host?value=%s" % (SsoUnitTest.url_prefix, self.search_host) f = urllib2.urlopen(url) def setCookieTest(self, value): """Method for switching test_cookie_path on and off.""" url = "%s/testcookiepath?value=%s" % (SsoUnitTest.url_prefix, value) f = urllib2.urlopen(url) val = f.read() if value: self.assert_("""val.find("True") > -1""") else: self.assert_("""val.find("True") == -1""") def setCookieDomain(self, value): url = "%s/set_cookie_domain?value=%s" % (SsoUnitTest.url_prefix, value) f = urllib2.urlopen(url) val = f.read() self.assertEqual(val, value) def setSearchHost(self, value): url = "%s/set_search_host?value=%s" % (SsoUnitTest.url_prefix, value) f = urllib2.urlopen(url) self.cookie_domain = f.read() self.assertEqual(self.cookie_domain, value) def getPublic(self): """Method for getting a public URL.""" url = "%s/public" % (SsoUnitTest.url_prefix) f = urllib2.urlopen(url) self.assert_("f.code() == 200") self.assertEqual(f.geturl(), url) def testGetPublicCookiePathEnabled(self): """Get a public URL with test_cookie_path enabled.""" self.setCookieTest(0) self.getPublic() def testGetPublicCookiePathDisabled(self): """Get a public URL with test_cookie_path disabled.""" self.setCookieTest(1) self.getPublic() def getSecureWithAuth(self): """Method for getting a secure URL with the authentication cookies.""" url = "%s/secure" % (SsoUnitTest.url_prefix) req = urllib2.Request(url) req.add_header("Cookie", "%s=%s;" % (sso.SSO_COOKIE, SsoUnitTest.login)) f = urllib2.urlopen(req) self.assert_("f.code() == 200") self.assertEqual(f.geturl(), url) def testGetSecureWithAuthCookiePathEnabled(self): """Get a secure URL with a cookie and with test_cookie_path enabled.""" self.setCookieTest(0) self.getSecureWithAuth() def testGetSecureWithAuthCookiePathDisabled(self): """Get a secure URL with a cookie and with test_cookie_path disabled.""" self.setCookieTest(1) self.getSecureWithAuth() def getSecureWithoutAuth(self, script): """Method for getting a secure URL without the authentication cookies.""" url = "%s/secure" % (SsoUnitTest.url_prefix) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(url) raise SsoUnitTestException("secure page did not redirect to login form") except urllib2.HTTPError, e: self.assert_(e.code == 302 or e.code == 303) self.assertEqual(e.headers["location"], "%s/%s?path=secure" % (SsoUnitTest.url_prefix, script)) self.assert_(e.headers.has_key("set-cookie") == False) def testGetSecureWithoutAuthCookiePathDisabled(self): """Get a secure URL without a cookie and with test_cookie_path disabled. Should redirect to /secure without setting any cookies.""" self.setCookieTest(0) self.getSecureWithoutAuth("form") def testGetSecureWithoutAuthCookiePathEnabled(self): """Get a secure URL without a cookie and with test_cookie_path enabled. Should redirect to /obrareq without setting any cookies.""" self.setCookieTest(1) self.getSecureWithoutAuth("obrareq") def testGetObrareq(self): """Method for getting a secure URL without the authentication cookies. Should redirect to the login form and set FORM_COOKIE with a path of /login.""" self.setCookieTest(0) url = "%s/obrareq?path=%s" % (SsoUnitTest.url_prefix, "secure") cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj), CustomRedirectHandler()) try: f = opener.open(url) raise SsoUnitTestException("obrareq did not redirect to login form") except urllib2.HTTPError, e: self.assertEqual(e.code, 302) self.assertEqual(e.headers["location"], "%s/form?path=%s" % (SsoUnitTest.url_prefix, "secure")) self.assertEqual(e.headers["set-cookie"], "%s=%s; Path=/login;" % (sso.FORM_COOKIE, "1")) def getForm(self, test_cookie_path): """Method for getting the login form.""" url = "%s/form?path=secure" % (SsoUnitTest.url_prefix) f = urllib2.urlopen(url) self.assert_("f.code() == 200") self.assertEqual(f.geturl(), url) if test_cookie_path: self.assertEqual(f.headers["set-cookie"], "%s=%s;" % (sso.SSO_COOKIE, "1")) def testGetFormWithBadcookie(self): """Method for getting the login form with a bad cookie.""" self.setCookieTest(1) url = "%s/form?path=secure" % (SsoUnitTest.url_prefix) req = urllib2.Request(url) req.add_header("Cookie", "%s=%s;" % (sso.FORM_COOKIE, "1")) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(req) raise SsoUnitTestException("form did not redirect when getting bad cookie") except urllib2.HTTPError, e: self.assertEqual(e.code, 302) self.assertEqual(e.headers["location"], "%s/login?msg=%s" % (SsoUnitTest.url_prefix, "bad_cookie")) def testGetLoginPageWithError(self): """Method for posting the login form with an error message. Expect to get back a 200 status code with an error in the HTML body and no SSO_COOKIE.""" self.setCookieTest(1) url = "%s/login?msg=%s" % (SsoUnitTest.url_prefix, "bad_cookie") params = urllib.urlencode({ "login":SsoUnitTest.login, "password":"test1", "path":"secure" }) req = urllib2.Request(url) req.add_header("Cookie", "%s=%s;" % (sso.FORM_COOKIE, "1")) req.add_data(params) opener = urllib2.build_opener(CustomRedirectHandler()) f = opener.open(req) self.assert_("f.code() == 200") self.assertEqual(f.geturl(), url) self.assert_(f.headers.has_key("set-cookie") == False) def testGetFormCookiePathEnabled(self): self.setCookieTest(1) self.getForm(True) def testGetFormCookiePathDisabled(self): self.setCookieTest(0) self.getForm(False) def testPostFormCookiePathDisabled(self): self.setCookieTest(0) self.setCookieDomain(SsoUnitTest.cookie_domain) url = "%s/login" % (SsoUnitTest.url_prefix) params = urllib.urlencode({ "login":SsoUnitTest.login, "password":"test1", "path":"secure" }) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(url, params) raise SsoUnitTestException("login form did not redirect after submit") except urllib2.HTTPError, e: self.assertEqual(e.code, 302) self.assertEqual(e.headers["location"], "%s/secure" % (SsoUnitTest.url_prefix)) if self.cookie_domain: self.assertEqual(e.headers["set-cookie"], "%s=%s; Domain=%s;" % (sso.SSO_COOKIE, SsoUnitTest.login, SsoUnitTest.cookie_domain)) else: self.assertEqual(e.headers["set-cookie"], "%s=%s" % (sso.SSO_COOKIE, SsoUnitTest.login)) def testPostFormCookiePathEnabled(self): self.setCookieTest(1) self.setCookieDomain(SsoUnitTest.cookie_domain) url = "%s/login" % (SsoUnitTest.url_prefix) params = urllib.urlencode({ "login":SsoUnitTest.login, "password":"test1", "path":"secure" }) req = urllib2.Request(url) req.add_header("Cookie", "%s=%s; %s=%s;" % (sso.SSO_COOKIE, "1", sso.FORM_COOKIE, "1")) req.add_data(params) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(req) raise SsoUnitTestException("login form did not redirect after submit") except urllib2.HTTPError, e: self.assertEqual(e.code, 302) self.assertEqual(e.headers["location"], "%s/secure" % (SsoUnitTest.url_prefix)) if self.cookie_domain: self.assertEqual(e.headers["set-cookie"], "%s=%s; Domain=%s;" % (sso.SSO_COOKIE, SsoUnitTest.login, SsoUnitTest.cookie_domain)) else: self.assertEqual(e.headers["set-cookie"], "%s=%s" % (sso.SSO_COOKIE, SsoUnitTest.login)) def testExternalLoginWithAuth(self): """Method for getting the External Login URL with the authentication cookies.""" self.setCookieTest(0) self.setCookieDomain(SsoUnitTest.cookie_domain) self.setSearchHost(SsoUnitTest.search_host) url = "%s/externalLogin?returnPath=/secure" % (SsoUnitTest.url_prefix) req = urllib2.Request(url) req.add_header("Cookie", "%s=%s;" % (sso.SSO_COOKIE, SsoUnitTest.login)) # req.add_header("Referer", "%s/" % (SsoUnitTest.url_prefix)) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(req) raise SsoUnitTestException("External Login did not redirect when presented with valid cookie") except urllib2.HTTPError, e: self.assertEqual(e.code, 302) self.assertEqual(e.headers["location"], "http://%s/secure" % (SsoUnitTest.search_host)) def testExternalLoginWithoutAuth(self): """Method for getting External Login URL without the authentication cookies.""" self.setCookieTest(0) self.setCookieDomain(SsoUnitTest.cookie_domain) self.setSearchHost(SsoUnitTest.search_host) url = "%s/externalLogin?returnPath=/secure" % (SsoUnitTest.url_prefix) req = urllib2.Request(url) # req.add_header("Referer", "%s/" % (SsoUnitTest.url_prefix)) opener = urllib2.build_opener(CustomRedirectHandler()) try: f = opener.open(req) raise SsoUnitTestException("External Login did not redirect to login form") except urllib2.HTTPError, e: self.assert_(e.code, 302) self.assertEqual(e.headers["location"], "%s/form?path=http%%3A//%s/secure" % (SsoUnitTest.url_prefix, urllib.quote(SsoUnitTest.search_host))) self.assert_(e.headers.has_key("set-cookie") == False) def main(argv): pass if __name__ == '__main__': unittest.main()
xbmc/atv2
refs/heads/atv2
xbmc/lib/libPython/Python/Mac/Modules/qt/setup.py
39
# This is a temporary setup script to allow distribution of # MacPython 2.4 modules for MacPython 2.3. from distutils.core import Extension, setup setup(name="QuickTime", version="0.2", ext_modules=[ Extension('QuickTime._Qt', ['_Qtmodule.c'], extra_link_args=['-framework', 'Carbon', '-framework', 'QuickTime']) ], py_modules=['QuickTime.Qt', 'QuickTime.QuickTime'], package_dir={'QuickTime':'../../../Lib/plat-mac/Carbon'} )
pottzer/home-assistant
refs/heads/dev
homeassistant/helpers/state.py
5
""" homeassistant.helpers.state ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Helpers that help with state related things. """ import logging from homeassistant.core import State import homeassistant.util.dt as dt_util from homeassistant.const import ( STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE, STATE_PLAYING, STATE_PAUSED, ATTR_ENTITY_ID) from homeassistant.components.media_player import (SERVICE_PLAY_MEDIA) _LOGGER = logging.getLogger(__name__) # pylint: disable=too-few-public-methods, attribute-defined-outside-init class TrackStates(object): """ Records the time when the with-block is entered. Will add all states that have changed since the start time to the return list when with-block is exited. """ def __init__(self, hass): self.hass = hass self.states = [] def __enter__(self): self.now = dt_util.utcnow() return self.states def __exit__(self, exc_type, exc_value, traceback): self.states.extend(get_changed_since(self.hass.states.all(), self.now)) def get_changed_since(states, utc_point_in_time): """ Returns all states that have been changed since utc_point_in_time. """ point_in_time = dt_util.strip_microseconds(utc_point_in_time) return [state for state in states if state.last_updated >= point_in_time] def reproduce_state(hass, states, blocking=False): """ Takes in a state and will try to have the entity reproduce it. """ if isinstance(states, State): states = [states] for state in states: current_state = hass.states.get(state.entity_id) if current_state is None: _LOGGER.warning('reproduce_state: Unable to find entity %s', state.entity_id) continue if state.domain == 'media_player' and state.attributes and \ 'media_type' in state.attributes and \ 'media_id' in state.attributes: service = SERVICE_PLAY_MEDIA elif state.domain == 'media_player' and state.state == STATE_PAUSED: service = SERVICE_MEDIA_PAUSE elif state.domain == 'media_player' and state.state == STATE_PLAYING: service = SERVICE_MEDIA_PLAY elif state.state == STATE_ON: service = SERVICE_TURN_ON elif state.state == STATE_OFF: service = SERVICE_TURN_OFF else: _LOGGER.warning("reproduce_state: Unable to reproduce state %s", state) continue service_data = dict(state.attributes) service_data[ATTR_ENTITY_ID] = state.entity_id hass.services.call(state.domain, service, service_data, blocking)
AlexandreProenca/django-elasticsearch
refs/heads/master
test_project/test_project/wsgi.py
73
""" WSGI config for test_project project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
cynngah/uofthacksIV
refs/heads/master
generate-jobs/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
422
#------------------------------------------------------------------- # tarfile.py #------------------------------------------------------------------- # Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> # All rights reserved. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # from __future__ import print_function """Read from and write to tar format archives. """ __version__ = "$Revision$" version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" __cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." #--------- # Imports #--------- import sys import os import stat import errno import time import struct import copy import re try: import grp, pwd except ImportError: grp = pwd = None # os.symlink on Windows prior to 6.0 raises NotImplementedError symlink_exception = (AttributeError, NotImplementedError) try: # WindowsError (1314) will be raised if the caller does not hold the # SeCreateSymbolicLinkPrivilege privilege symlink_exception += (WindowsError,) except NameError: pass # from tarfile import * __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins _open = builtins.open # Since 'open' is TarFile.open #--------------------------------------------------------- # tar constants #--------------------------------------------------------- NUL = b"\0" # the null character BLOCKSIZE = 512 # length of processing blocks RECORDSIZE = BLOCKSIZE * 20 # length of records GNU_MAGIC = b"ustar \0" # magic gnu tar string POSIX_MAGIC = b"ustar\x0000" # magic posix tar string LENGTH_NAME = 100 # maximum length of a filename LENGTH_LINK = 100 # maximum length of a linkname LENGTH_PREFIX = 155 # maximum length of the prefix field REGTYPE = b"0" # regular file AREGTYPE = b"\0" # regular file LNKTYPE = b"1" # link (inside tarfile) SYMTYPE = b"2" # symbolic link CHRTYPE = b"3" # character special device BLKTYPE = b"4" # block special device DIRTYPE = b"5" # directory FIFOTYPE = b"6" # fifo special device CONTTYPE = b"7" # contiguous file GNUTYPE_LONGNAME = b"L" # GNU tar longname GNUTYPE_LONGLINK = b"K" # GNU tar longlink GNUTYPE_SPARSE = b"S" # GNU tar sparse file XHDTYPE = b"x" # POSIX.1-2001 extended header XGLTYPE = b"g" # POSIX.1-2001 global header SOLARIS_XHDTYPE = b"X" # Solaris extended header USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format GNU_FORMAT = 1 # GNU tar format PAX_FORMAT = 2 # POSIX.1-2001 (pax) format DEFAULT_FORMAT = GNU_FORMAT #--------------------------------------------------------- # tarfile constants #--------------------------------------------------------- # File types that tarfile supports: SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # File types that will be treated as a regular file. REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) # File types that are part of the GNU tar format. GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # Fields from a pax header that override a TarInfo attribute. PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") # Fields from a pax header that are affected by hdrcharset. PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) # Fields in a pax header that are numbers, all other fields # are treated as strings. PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } #--------------------------------------------------------- # Bits used in the mode field, values in octal. #--------------------------------------------------------- S_IFLNK = 0o120000 # symbolic link S_IFREG = 0o100000 # regular file S_IFBLK = 0o060000 # block device S_IFDIR = 0o040000 # directory S_IFCHR = 0o020000 # character device S_IFIFO = 0o010000 # fifo TSUID = 0o4000 # set UID on execution TSGID = 0o2000 # set GID on execution TSVTX = 0o1000 # reserved TUREAD = 0o400 # read by owner TUWRITE = 0o200 # write by owner TUEXEC = 0o100 # execute/search by owner TGREAD = 0o040 # read by group TGWRITE = 0o020 # write by group TGEXEC = 0o010 # execute/search by group TOREAD = 0o004 # read by other TOWRITE = 0o002 # write by other TOEXEC = 0o001 # execute/search by other #--------------------------------------------------------- # initialization #--------------------------------------------------------- if os.name in ("nt", "ce"): ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() #--------------------------------------------------------- # Some useful functions #--------------------------------------------------------- def stn(s, length, encoding, errors): """Convert a string to a null-terminated bytes object. """ s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): """Convert a null-terminated bytes object to a string. """ p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): """Convert a number field to a python number. """ # There are two possible encodings for a number field, see # itn() below. if s[0] != chr(0o200): try: n = int(nts(s, "ascii", "strict") or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") else: n = 0 for i in range(len(s) - 1): n <<= 8 n += ord(s[i + 1]) return n def itn(n, digits=8, format=DEFAULT_FORMAT): """Convert a python number to a number field. """ # POSIX 1003.1-1988 requires numbers to be encoded as a string of # octal digits followed by a null-byte, this allows values up to # (8**(digits-1))-1. GNU tar allows storing numbers greater than # that if necessary. A leading 0o200 byte indicates this particular # encoding, the following digits-1 bytes are a big-endian # representation. This allows values up to (256**(digits-1))-1. if 0 <= n < 8 ** (digits - 1): s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL else: if format != GNU_FORMAT or n >= 256 ** (digits - 1): raise ValueError("overflow in number field") if n < 0: # XXX We mimic GNU tar's behaviour with negative numbers, # this could raise OverflowError. n = struct.unpack("L", struct.pack("l", n))[0] s = bytearray() for i in range(digits - 1): s.insert(0, n & 0o377) n >>= 8 s.insert(0, 0o200) return s def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return filemode_table = ( ((S_IFLNK, "l"), (S_IFREG, "-"), (S_IFBLK, "b"), (S_IFDIR, "d"), (S_IFCHR, "c"), (S_IFIFO, "p")), ((TUREAD, "r"),), ((TUWRITE, "w"),), ((TUEXEC|TSUID, "s"), (TSUID, "S"), (TUEXEC, "x")), ((TGREAD, "r"),), ((TGWRITE, "w"),), ((TGEXEC|TSGID, "s"), (TSGID, "S"), (TGEXEC, "x")), ((TOREAD, "r"),), ((TOWRITE, "w"),), ((TOEXEC|TSVTX, "t"), (TSVTX, "T"), (TOEXEC, "x")) ) def filemode(mode): """Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() """ perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm) class TarError(Exception): """Base exception.""" pass class ExtractError(TarError): """General exception for extract errors.""" pass class ReadError(TarError): """Exception for unreadable tar archives.""" pass class CompressionError(TarError): """Exception for unavailable compression methods.""" pass class StreamError(TarError): """Exception for unsupported operations on stream-like TarFiles.""" pass class HeaderError(TarError): """Base exception for header errors.""" pass class EmptyHeaderError(HeaderError): """Exception for empty headers.""" pass class TruncatedHeaderError(HeaderError): """Exception for truncated headers.""" pass class EOFHeaderError(HeaderError): """Exception for end of file headers.""" pass class InvalidHeaderError(HeaderError): """Exception for invalid headers.""" pass class SubsequentHeaderError(HeaderError): """Exception for missing and invalid extended headers.""" pass #--------------------------- # internal stream interface #--------------------------- class _LowLevelFile(object): """Low-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. """ def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if hasattr(os, "O_BINARY"): mode |= os.O_BINARY self.fd = os.open(name, mode, 0o666) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream(object): """Class that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. """ def __init__(self, name, mode, comptype, fileobj, bufsize): """Construct a _Stream object. """ self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': # Enable transparent compression detection for the # stream interface fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = b"" self.pos = 0 self.closed = False try: if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") self.zlib = zlib self.crc = zlib.crc32(b"") if mode == "r": self._init_read_gz() else: self._init_write_gz() if comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if mode == "r": self.dbuf = b"" self.cmp = bz2.BZ2Decompressor() else: self.cmp = bz2.BZ2Compressor() except: if not self._extfileobj: self.fileobj.close() self.closed = True raise def __del__(self): if hasattr(self, "closed") and not self.closed: self.close() def _init_write_gz(self): """Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write(b"\037\213\010\010" + timestamp + b"\002\377") if self.name.endswith(".gz"): self.name = self.name[:-3] # RFC1952 says we must use ISO-8859-1 for the FNAME field. self.__write(self.name.encode("iso-8859-1", "replace") + NUL) def write(self, s): """Write string s to the stream. """ if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) self.pos += len(s) if self.comptype != "tar": s = self.cmp.compress(s) self.__write(s) def __write(self, s): """Write string s to the stream if a whole new block is ready to be written. """ self.buf += s while len(self.buf) > self.bufsize: self.fileobj.write(self.buf[:self.bufsize]) self.buf = self.buf[self.bufsize:] def close(self): """Close the _Stream object. No operation should be done on it afterwards. """ if self.closed: return if self.mode == "w" and self.comptype != "tar": self.buf += self.cmp.flush() if self.mode == "w" and self.buf: self.fileobj.write(self.buf) self.buf = b"" if self.comptype == "gz": # The native zlib crc is an unsigned 32-bit integer, but # the Python wrapper implicitly casts that to a signed C # long. So, on a 32-bit box self.crc may "look negative", # while the same crc on a 64-bit box may "look positive". # To avoid irksome warnings from the `struct` module, force # it to look positive on all boxes. self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) if not self._extfileobj: self.fileobj.close() self.closed = True def _init_read_gz(self): """Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" # taken from gzip.GzipFile with some alterations if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2) def tell(self): """Return the stream's file pointer position. """ return self.pos def seek(self, pos=0): """Set the stream's file pointer to pos. Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos def read(self, size=None): """Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf def __read(self, size): """Return size bytes from stream. If internal buffer is empty, read another block from the stream. """ c = len(self.buf) while c < size: buf = self.fileobj.read(self.bufsize) if not buf: break self.buf += buf c += len(buf) buf = self.buf[:size] self.buf = self.buf[size:] return buf # class _Stream class _StreamProxy(object): """Small proxy class that enables transparent compression detection for the Stream interface (mode 'r|*'). """ def __init__(self, fileobj): self.fileobj = fileobj self.buf = self.fileobj.read(BLOCKSIZE) def read(self, size): self.read = self.fileobj.read return self.buf def getcomptype(self): if self.buf.startswith(b"\037\213\010"): return "gz" if self.buf.startswith(b"BZh91"): return "bz2" return "tar" def close(self): self.fileobj.close() # class StreamProxy class _BZ2Proxy(object): """Small proxy class that enables external file object support for "r:bz2" and "w:bz2" modes. This is actually a workaround for a limitation in bz2 module's BZ2File class which (unlike gzip.GzipFile) has no support for a file object argument. """ blocksize = 16 * 1024 def __init__(self, fileobj, mode): self.fileobj = fileobj self.mode = mode self.name = getattr(self.fileobj, "name", None) self.init() def init(self): import bz2 self.pos = 0 if self.mode == "r": self.bz2obj = bz2.BZ2Decompressor() self.fileobj.seek(0) self.buf = b"" else: self.bz2obj = bz2.BZ2Compressor() def read(self, size): x = len(self.buf) while x < size: raw = self.fileobj.read(self.blocksize) if not raw: break data = self.bz2obj.decompress(raw) self.buf += data x += len(data) buf = self.buf[:size] self.buf = self.buf[size:] self.pos += len(buf) return buf def seek(self, pos): if pos < self.pos: self.init() self.read(pos - self.pos) def tell(self): return self.pos def write(self, data): self.pos += len(data) raw = self.bz2obj.compress(data) self.fileobj.write(raw) def close(self): if self.mode == "w": raw = self.bz2obj.flush() self.fileobj.write(raw) # class _BZ2Proxy #------------------------ # Extraction file object #------------------------ class _FileInFile(object): """A thin wrapper around an existing file object that provides a part of its data as an individual file object. """ def __init__(self, fileobj, offset, size, blockinfo=None): self.fileobj = fileobj self.offset = offset self.size = size self.position = 0 if blockinfo is None: blockinfo = [(0, size)] # Construct a map with data and zero blocks. self.map_index = 0 self.map = [] lastpos = 0 realpos = self.offset for offset, size in blockinfo: if offset > lastpos: self.map.append((False, lastpos, offset, None)) self.map.append((True, offset, offset + size, realpos)) realpos += size lastpos = offset + size if lastpos < self.size: self.map.append((False, lastpos, self.size, None)) def seekable(self): if not hasattr(self.fileobj, "seekable"): # XXX gzip.GzipFile and bz2.BZ2File return True return self.fileobj.seekable() def tell(self): """Return the current file position. """ return self.position def seek(self, position): """Seek to a position in the file. """ self.position = position def read(self, size=None): """Read data from the file. """ if size is None: size = self.size - self.position else: size = min(size, self.size - self.position) buf = b"" while size > 0: while True: data, start, stop, offset = self.map[self.map_index] if start <= self.position < stop: break else: self.map_index += 1 if self.map_index == len(self.map): self.map_index = 0 length = min(size, stop - self.position) if data: self.fileobj.seek(offset + (self.position - start)) buf += self.fileobj.read(length) else: buf += NUL * length size -= length self.position += length return buf #class _FileInFile class ExFileObject(object): """File-like object for reading an archive member. Is returned by TarFile.extractfile(). """ blocksize = 1024 def __init__(self, tarfile, tarinfo): self.fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse) self.name = tarinfo.name self.mode = "r" self.closed = False self.size = tarinfo.size self.position = 0 self.buffer = b"" def readable(self): return True def writable(self): return False def seekable(self): return self.fileobj.seekable() def read(self, size=None): """Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") buf = b"" if self.buffer: if size is None: buf = self.buffer self.buffer = b"" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] if size is None: buf += self.fileobj.read() else: buf += self.fileobj.read(size - len(buf)) self.position += len(buf) return buf # XXX TextIOWrapper uses the read1() method. read1 = read def readline(self, size=-1): """Read one entire line from the file. If size is present and non-negative, return a string with at most that size, which may be an incomplete line. """ if self.closed: raise ValueError("I/O operation on closed file") pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. while True: buf = self.fileobj.read(self.blocksize) self.buffer += buf if not buf or b"\n" in buf: pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. pos = len(self.buffer) break if size != -1: pos = min(size, pos) buf = self.buffer[:pos] self.buffer = self.buffer[pos:] self.position += len(buf) return buf def readlines(self): """Return a list with all remaining lines. """ result = [] while True: line = self.readline() if not line: break result.append(line) return result def tell(self): """Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") return self.position def seek(self, pos, whence=os.SEEK_SET): """Seek to a position in the file. """ if self.closed: raise ValueError("I/O operation on closed file") if whence == os.SEEK_SET: self.position = min(max(pos, 0), self.size) elif whence == os.SEEK_CUR: if pos < 0: self.position = max(self.position + pos, 0) else: self.position = min(self.position + pos, self.size) elif whence == os.SEEK_END: self.position = max(min(self.size + pos, self.size), 0) else: raise ValueError("Invalid argument") self.buffer = b"" self.fileobj.seek(self.position) def close(self): """Close the file object. """ self.closed = True def __iter__(self): """Get an iterator over the file's lines. """ while True: line = self.readline() if not line: break yield line #class ExFileObject #------------------ # Exported Classes #------------------ class TarInfo(object): """Informational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. """ __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", "chksum", "type", "linkname", "uname", "gname", "devmajor", "devminor", "offset", "offset_data", "pax_headers", "sparse", "tarfile", "_sparse_structs", "_link_target") def __init__(self, name=""): """Construct a TarInfo object. name is the optional name of the member. """ self.name = name # member name self.mode = 0o644 # file permissions self.uid = 0 # user id self.gid = 0 # group id self.size = 0 # file size self.mtime = 0 # modification time self.chksum = 0 # header checksum self.type = REGTYPE # member type self.linkname = "" # link name self.uname = "" # user name self.gname = "" # group name self.devmajor = 0 # device major number self.devminor = 0 # device minor number self.offset = 0 # the tar header starts here self.offset_data = 0 # the file's data starts here self.sparse = None # sparse member information self.pax_headers = {} # pax header information # In pax headers the "name" and "linkname" field are called # "path" and "linkpath". def _getpath(self): return self.name def _setpath(self, name): self.name = name path = property(_getpath, _setpath) def _getlinkpath(self): return self.linkname def _setlinkpath(self, linkname): self.linkname = linkname linkpath = property(_getlinkpath, _setlinkpath) def __repr__(self): return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) def get_info(self): """Return the TarInfo's attributes as a dictionary. """ info = { "name": self.name, "mode": self.mode & 0o7777, "uid": self.uid, "gid": self.gid, "size": self.size, "mtime": self.mtime, "chksum": self.chksum, "type": self.type, "linkname": self.linkname, "uname": self.uname, "gname": self.gname, "devmajor": self.devmajor, "devminor": self.devminor } if info["type"] == DIRTYPE and not info["name"].endswith("/"): info["name"] += "/" return info def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): """Return a tar header as a string of 512 byte blocks. """ info = self.get_info() if format == USTAR_FORMAT: return self.create_ustar_header(info, encoding, errors) elif format == GNU_FORMAT: return self.create_gnu_header(info, encoding, errors) elif format == PAX_FORMAT: return self.create_pax_header(info, encoding) else: raise ValueError("invalid format") def create_ustar_header(self, info, encoding, errors): """Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors) def create_gnu_header(self, info, encoding, errors): """Return the object as a GNU header block sequence. """ info["magic"] = GNU_MAGIC buf = b"" if len(info["linkname"]) > LENGTH_LINK: buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) if len(info["name"]) > LENGTH_NAME: buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) return buf + self._create_header(info, GNU_FORMAT, encoding, errors) def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") @classmethod def create_pax_global_header(cls, pax_headers): """Return the object as a pax global header block sequence. """ return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name @staticmethod def _create_header(info, format, encoding, errors): """Return a header block. info is a dictionary with file information, format must be one of the *_FORMAT constants. """ parts = [ stn(info.get("name", ""), 100, encoding, errors), itn(info.get("mode", 0) & 0o7777, 8, format), itn(info.get("uid", 0), 8, format), itn(info.get("gid", 0), 8, format), itn(info.get("size", 0), 12, format), itn(info.get("mtime", 0), 12, format), b" ", # checksum field info.get("type", REGTYPE), stn(info.get("linkname", ""), 100, encoding, errors), info.get("magic", POSIX_MAGIC), stn(info.get("uname", ""), 32, encoding, errors), stn(info.get("gname", ""), 32, encoding, errors), itn(info.get("devmajor", 0), 8, format), itn(info.get("devminor", 0), 8, format), stn(info.get("prefix", ""), 155, encoding, errors) ] buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) chksum = calc_chksums(buf[-BLOCKSIZE:])[0] buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] return buf @staticmethod def _create_payload(payload): """Return the string payload filled with zero bytes up to the next 512 byte border. """ blocks, remainder = divmod(len(payload), BLOCKSIZE) if remainder > 0: payload += (BLOCKSIZE - remainder) * NUL return payload @classmethod def _create_gnu_long_header(cls, name, type, encoding, errors): """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) @classmethod def _create_pax_generic_header(cls, pax_headers, type, encoding): """Return a POSIX.1-2008 extended or global header sequence that contains a list of keyword, value pairs. The values must be strings. """ # Check if one of the fields contains surrogate characters and thereby # forces hdrcharset=BINARY, see _proc_pax() for more information. binary = False for keyword, value in pax_headers.items(): try: value.encode("utf8", "strict") except UnicodeEncodeError: binary = True break records = b"" if binary: # Put the hdrcharset field at the beginning of the header. records += b"21 hdrcharset=BINARY\n" for keyword, value in pax_headers.items(): keyword = keyword.encode("utf8") if binary: # Try to restore the original byte representation of `value'. # Needless to say, that the encoding must match the string. value = value.encode(encoding, "surrogateescape") else: value = value.encode("utf8") l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" # We use a hardcoded "././@PaxHeader" name like star does # instead of the one that POSIX recommends. info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = POSIX_MAGIC # Create pax header + record blocks. return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ cls._create_payload(records) @classmethod def frombuf(cls, buf, encoding, errors): """Construct a TarInfo object from a 512 byte bytes object. """ if len(buf) == 0: raise EmptyHeaderError("empty header") if len(buf) != BLOCKSIZE: raise TruncatedHeaderError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise EOFHeaderError("end of file header") chksum = nti(buf[148:156]) if chksum not in calc_chksums(buf): raise InvalidHeaderError("bad checksum") obj = cls() obj.name = nts(buf[0:100], encoding, errors) obj.mode = nti(buf[100:108]) obj.uid = nti(buf[108:116]) obj.gid = nti(buf[116:124]) obj.size = nti(buf[124:136]) obj.mtime = nti(buf[136:148]) obj.chksum = chksum obj.type = buf[156:157] obj.linkname = nts(buf[157:257], encoding, errors) obj.uname = nts(buf[265:297], encoding, errors) obj.gname = nts(buf[297:329], encoding, errors) obj.devmajor = nti(buf[329:337]) obj.devminor = nti(buf[337:345]) prefix = nts(buf[345:500], encoding, errors) # Old V7 tar format represents a directory as a regular # file with a trailing slash. if obj.type == AREGTYPE and obj.name.endswith("/"): obj.type = DIRTYPE # The old GNU sparse format occupies some of the unused # space in the buffer for up to 4 sparse structures. # Save the them for later processing in _proc_sparse(). if obj.type == GNUTYPE_SPARSE: pos = 386 structs = [] for i in range(4): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[482]) origsize = nti(buf[483:495]) obj._sparse_structs = (structs, isextended, origsize) # Remove redundant slashes from directories. if obj.isdir(): obj.name = obj.name.rstrip("/") # Reconstruct a ustar longname. if prefix and obj.type not in GNU_TYPES: obj.name = prefix + "/" + obj.name return obj @classmethod def fromtarfile(cls, tarfile): """Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) obj.offset = tarfile.fileobj.tell() - BLOCKSIZE return obj._proc_member(tarfile) #-------------------------------------------------------------------------- # The following are methods that are called depending on the type of a # member. The entry point is _proc_member() which can be overridden in a # subclass to add custom _proc_*() methods. A _proc_*() method MUST # implement the following # operations: # 1. Set self.offset_data to the position where the data blocks begin, # if there is data that follows. # 2. Set tarfile.offset to the position where the next member's header will # begin. # 3. Return self or another valid TarInfo object. def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile) def _proc_builtin(self, tarfile): """Process a builtin type or an unknown type which will be treated as a regular file. """ self.offset_data = tarfile.fileobj.tell() offset = self.offset_data if self.isreg() or self.type not in SUPPORTED_TYPES: # Skip the following data blocks. offset += self._block(self.size) tarfile.offset = offset # Patch the TarInfo object with saved global # header information. self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) return self def _proc_gnulong(self, tarfile): """Process the blocks that hold a GNU longname or longlink member. """ buf = tarfile.fileobj.read(self._block(self.size)) # Fetch the next header and process it. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Patch the TarInfo object from the next header with # the longname information. next.offset = self.offset if self.type == GNUTYPE_LONGNAME: next.name = nts(buf, tarfile.encoding, tarfile.errors) elif self.type == GNUTYPE_LONGLINK: next.linkname = nts(buf, tarfile.encoding, tarfile.errors) return next def _proc_sparse(self, tarfile): """Process a GNU sparse header plus extra headers. """ # We already collected some sparse structures in frombuf(). structs, isextended, origsize = self._sparse_structs del self._sparse_structs # Collect sparse structures from extended header blocks. while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self def _proc_pax(self, tarfile): """Process an extended or global header as described in POSIX.1-2008. """ # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use "utf8" as the encoding and "strict" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, "utf8", "utf8", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, "utf8", "utf8", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Process GNU sparse information. if "GNU.sparse.map" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif "GNU.sparse.size" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes)) def _proc_gnusparse_01(self, next, pax_headers): """Process a GNU tar extended sparse header, version 0.1. """ sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] next.sparse = list(zip(sparse[::2], sparse[1::2])) def _proc_gnusparse_10(self, next, pax_headers, tarfile): """Process a GNU tar extended sparse header, version 1.0. """ fields = None sparse = [] buf = tarfile.fileobj.read(BLOCKSIZE) fields, buf = buf.split(b"\n", 1) fields = int(fields) while len(sparse) < fields * 2: if b"\n" not in buf: buf += tarfile.fileobj.read(BLOCKSIZE) number, buf = buf.split(b"\n", 1) sparse.append(int(number)) next.offset_data = tarfile.fileobj.tell() next.sparse = list(zip(sparse[::2], sparse[1::2])) def _apply_pax_info(self, pax_headers, encoding, errors): """Replace fields with supplemental information from a previous pax extended or global header. """ for keyword, value in pax_headers.items(): if keyword == "GNU.sparse.name": setattr(self, "path", value) elif keyword == "GNU.sparse.size": setattr(self, "size", int(value)) elif keyword == "GNU.sparse.realsize": setattr(self, "size", int(value)) elif keyword in PAX_FIELDS: if keyword in PAX_NUMBER_FIELDS: try: value = PAX_NUMBER_FIELDS[keyword](value) except ValueError: value = 0 if keyword == "path": value = value.rstrip("/") setattr(self, keyword, value) self.pax_headers = pax_headers.copy() def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): """Decode a single field from a pax record. """ try: return value.decode(encoding, "strict") except UnicodeDecodeError: return value.decode(fallback_encoding, fallback_errors) def _block(self, count): """Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. """ blocks, remainder = divmod(count, BLOCKSIZE) if remainder: blocks += 1 return blocks * BLOCKSIZE def isreg(self): return self.type in REGULAR_TYPES def isfile(self): return self.isreg() def isdir(self): return self.type == DIRTYPE def issym(self): return self.type == SYMTYPE def islnk(self): return self.type == LNKTYPE def ischr(self): return self.type == CHRTYPE def isblk(self): return self.type == BLKTYPE def isfifo(self): return self.type == FIFOTYPE def issparse(self): return self.sparse is not None def isdev(self): return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) # class TarInfo class TarFile(object): """The TarFile Class provides an interface to tar archives. """ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) dereference = False # If true, add content of linked file to the # tar file, else the link. ignore_zeros = False # If true, skips empty or invalid blocks and # continues processing. errorlevel = 1 # If 0, fatal errors only appear in debug # messages (if debug >= 0). If > 0, errors # are passed to the caller as exceptions. format = DEFAULT_FORMAT # The format to use when creating an archive. encoding = ENCODING # Encoding for 8-bit character strings. errors = None # Error handler for unicode conversion. tarinfo = TarInfo # The default TarInfo class to use. fileobject = ExFileObject # The default ExFileObject class to use. def __init__(self, name=None, mode="r", fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") self.mode = mode self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] if not fileobj: if self.mode == "a" and not os.path.exists(name): # Create nonexistent files in append mode. self.mode = "w" self._mode = "wb" fileobj = bltn_open(name, self._mode) self._extfileobj = False else: if name is None and hasattr(fileobj, "name"): name = fileobj.name if hasattr(fileobj, "mode"): self._mode = fileobj.mode self._extfileobj = True self.name = os.path.abspath(name) if name else None self.fileobj = fileobj # Init attributes. if format is not None: self.format = format if tarinfo is not None: self.tarinfo = tarinfo if dereference is not None: self.dereference = dereference if ignore_zeros is not None: self.ignore_zeros = ignore_zeros if encoding is not None: self.encoding = encoding self.errors = errors if pax_headers is not None and self.format == PAX_FORMAT: self.pax_headers = pax_headers else: self.pax_headers = {} if debug is not None: self.debug = debug if errorlevel is not None: self.errorlevel = errorlevel # Init datastructures. self.closed = False self.members = [] # list of members as TarInfo objects self._loaded = False # flag if all members have been read self.offset = self.fileobj.tell() # current position in the archive file self.inodes = {} # dictionary caching the inodes of # archive members already added try: if self.mode == "r": self.firstmember = None self.firstmember = self.next() if self.mode == "a": # Move to the end of the archive, # before the first empty block. while True: self.fileobj.seek(self.offset) try: tarinfo = self.tarinfo.fromtarfile(self) self.members.append(tarinfo) except EOFHeaderError: self.fileobj.seek(self.offset) break except HeaderError as e: raise ReadError(str(e)) if self.mode in "aw": self._loaded = True if self.pax_headers: buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) self.fileobj.write(buf) self.offset += len(buf) except: if not self._extfileobj: self.fileobj.close() self.closed = True raise #-------------------------------------------------------------------------- # Below are the classmethods which act as alternate constructors to the # TarFile class. The open() method is the only one that is needed for # public use; it is the "super"-constructor and is able to select an # adequate "sub"-constructor for a particular compression using the mapping # from OPEN_METH. # # This concept allows one to subclass TarFile without losing the comfort of # the super-constructor. A sub-constructor is registered and made available # by adding it to the mapping in OPEN_METH. @classmethod def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): """Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: raise ValueError("nothing to open") if mode in ("r", "r:*"): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, "r", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") elif ":" in mode: filemode, comptype = mode.split(":", 1) filemode = filemode or "r" comptype = comptype or "tar" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError("unknown compression type %r" % comptype) return func(name, filemode, fileobj, **kwargs) elif "|" in mode: filemode, comptype = mode.split("|", 1) filemode = filemode or "r" comptype = comptype or "tar" if filemode not in "rw": raise ValueError("mode must be 'r' or 'w'") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in "aw": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError("undiscernible mode") @classmethod def taropen(cls, name, mode="r", fileobj=None, **kwargs): """Open uncompressed tar archive name for reading or writing. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") return cls(name, mode, fileobj, **kwargs) @classmethod def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open gzip compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'") try: import gzip gzip.GzipFile except (ImportError, AttributeError): raise CompressionError("gzip module is not available") extfileobj = fileobj is not None try: fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if not extfileobj and fileobj is not None: fileobj.close() if fileobj is None: raise raise ReadError("not a gzip file") except: if not extfileobj and fileobj is not None: fileobj.close() raise t._extfileobj = extfileobj return t @classmethod def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'.") try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if fileobj is not None: fileobj = _BZ2Proxy(fileobj, mode) else: fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) try: t = cls.taropen(name, mode, fileobj, **kwargs) except (IOError, EOFError): fileobj.close() raise ReadError("not a bzip2 file") t._extfileobj = False return t # All *open() methods are registered here. OPEN_METH = { "tar": "taropen", # uncompressed tar "gz": "gzopen", # gzip compressed tar "bz2": "bz2open" # bzip2 compressed tar } #-------------------------------------------------------------------------- # The public methods which TarFile provides: def close(self): """Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return if self.mode in "aw": self.fileobj.write(NUL * (BLOCKSIZE * 2)) self.offset += (BLOCKSIZE * 2) # fill up the end with zero-blocks # (like option -b20 for tar does) blocks, remainder = divmod(self.offset, RECORDSIZE) if remainder > 0: self.fileobj.write(NUL * (RECORDSIZE - remainder)) if not self._extfileobj: self.fileobj.close() self.closed = True def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo def getmembers(self): """Return the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. """ self._check() if not self._loaded: # if we want to obtain a list of self._load() # all members, we first have to # scan the whole archive. return self.members def getnames(self): """Return the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). """ return [tarinfo.name for tarinfo in self.getmembers()] def gettarinfo(self, name=None, arcname=None, fileobj=None): """Create a TarInfo object for either the file `name' or the file object `fileobj' (using os.fstat on its file descriptor). You can modify some of the TarInfo's attributes before you add it using addfile(). If given, `arcname' specifies an alternative name for the file in the archive. """ self._check("aw") # When fileobj is given, replace name by # fileobj's real name. if fileobj is not None: name = fileobj.name # Building the name of the member in the archive. # Backward slashes are converted to forward slashes, # Absolute paths are turned to relative paths. if arcname is None: arcname = name drv, arcname = os.path.splitdrive(arcname) arcname = arcname.replace(os.sep, "/") arcname = arcname.lstrip("/") # Now, fill the TarInfo object with # information specific for the file. tarinfo = self.tarinfo() tarinfo.tarfile = self # Use os.stat or os.lstat, depending on platform # and if symlinks shall be resolved. if fileobj is None: if hasattr(os, "lstat") and not self.dereference: statres = os.lstat(name) else: statres = os.stat(name) else: statres = os.fstat(fileobj.fileno()) linkname = "" stmd = statres.st_mode if stat.S_ISREG(stmd): inode = (statres.st_ino, statres.st_dev) if not self.dereference and statres.st_nlink > 1 and \ inode in self.inodes and arcname != self.inodes[inode]: # Is it a hardlink to an already # archived file? type = LNKTYPE linkname = self.inodes[inode] else: # The inode is added only if its valid. # For win32 it is always 0. type = REGTYPE if inode[0]: self.inodes[inode] = arcname elif stat.S_ISDIR(stmd): type = DIRTYPE elif stat.S_ISFIFO(stmd): type = FIFOTYPE elif stat.S_ISLNK(stmd): type = SYMTYPE linkname = os.readlink(name) elif stat.S_ISCHR(stmd): type = CHRTYPE elif stat.S_ISBLK(stmd): type = BLKTYPE else: return None # Fill the TarInfo object with all # information we can get. tarinfo.name = arcname tarinfo.mode = stmd tarinfo.uid = statres.st_uid tarinfo.gid = statres.st_gid if type == REGTYPE: tarinfo.size = statres.st_size else: tarinfo.size = 0 tarinfo.mtime = statres.st_mtime tarinfo.type = type tarinfo.linkname = linkname if pwd: try: tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] except KeyError: pass if grp: try: tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] except KeyError: pass if type in (CHRTYPE, BLKTYPE): if hasattr(os, "major") and hasattr(os, "minor"): tarinfo.devmajor = os.major(statres.st_rdev) tarinfo.devminor = os.minor(statres.st_rdev) return tarinfo def list(self, verbose=True): """Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced. """ self._check() for tarinfo in self: if verbose: print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): print("%10s" % ("%d,%d" \ % (tarinfo.devmajor, tarinfo.devminor)), end=' ') else: print("%10d" % tarinfo.size, end=' ') print("%d-%02d-%02d %02d:%02d:%02d" \ % time.localtime(tarinfo.mtime)[:6], end=' ') print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') if verbose: if tarinfo.issym(): print("->", tarinfo.linkname, end=' ') if tarinfo.islnk(): print("link to", tarinfo.linkname, end=' ') print() def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): """Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. """ self._check("aw") if arcname is None: arcname = name # Exclude pathnames. if exclude is not None: import warnings warnings.warn("use the filter argument instead", DeprecationWarning, 2) if exclude(name): self._dbg(2, "tarfile: Excluded %r" % name) return # Skip if somebody tries to archive the archive... if self.name is not None and os.path.abspath(name) == self.name: self._dbg(2, "tarfile: Skipped %r" % name) return self._dbg(1, name) # Create a TarInfo object from the file. tarinfo = self.gettarinfo(name, arcname) if tarinfo is None: self._dbg(1, "tarfile: Unsupported type %r" % name) return # Change or exclude the TarInfo object. if filter is not None: tarinfo = filter(tarinfo) if tarinfo is None: self._dbg(2, "tarfile: Excluded %r" % name) return # Append the tar header and data to the archive. if tarinfo.isreg(): f = bltn_open(name, "rb") self.addfile(tarinfo, f) f.close() elif tarinfo.isdir(): self.addfile(tarinfo) if recursive: for f in os.listdir(name): self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter=filter) else: self.addfile(tarinfo) def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo) def extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 0o700 # Do not set_attrs directories, as we will do that further down self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) # Reverse sort directories. directories.sort(key=lambda a: a.name) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extract(self, member, path="", set_attrs=True): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, "tarfile: %s" % e.strerror) else: self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extractfile(self, member): """Extract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member if tarinfo.isreg(): return self.fileobject(self, tarinfo) elif tarinfo.type not in SUPPORTED_TYPES: # If a member's type is unknown, it is treated as a # regular file. return self.fileobject(self, tarinfo) elif tarinfo.islnk() or tarinfo.issym(): if isinstance(self.fileobj, _Stream): # A small but ugly workaround for the case that someone tries # to extract a (sym)link as a file-object from a non-seekable # stream of tar blocks. raise StreamError("cannot extract (sym)link as file object") else: # A (sym)link's file object is its target's file object. return self.extractfile(self._find_link_target(tarinfo)) else: # If there's no data associated with the member (directory, chrdev, # blkdev, etc.), return None instead of a file object. return None def _extract_member(self, tarinfo, targetpath, set_attrs=True): """Extract the TarInfo object tarinfo to a physical file called targetpath. """ # Fetch the TarInfo object for the given name # and build the destination pathname, replacing # forward slashes to platform specific separators. targetpath = targetpath.rstrip("/") targetpath = targetpath.replace("/", os.sep) # Create all upper directories. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): # Create directories that are not part of the archive with # default permissions. os.makedirs(upperdirs) if tarinfo.islnk() or tarinfo.issym(): self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) else: self._dbg(1, tarinfo.name) if tarinfo.isreg(): self.makefile(tarinfo, targetpath) elif tarinfo.isdir(): self.makedir(tarinfo, targetpath) elif tarinfo.isfifo(): self.makefifo(tarinfo, targetpath) elif tarinfo.ischr() or tarinfo.isblk(): self.makedev(tarinfo, targetpath) elif tarinfo.islnk() or tarinfo.issym(): self.makelink(tarinfo, targetpath) elif tarinfo.type not in SUPPORTED_TYPES: self.makeunknown(tarinfo, targetpath) else: self.makefile(tarinfo, targetpath) if set_attrs: self.chown(tarinfo, targetpath) if not tarinfo.issym(): self.chmod(tarinfo, targetpath) self.utime(tarinfo, targetpath) #-------------------------------------------------------------------------- # Below are the different file methods. They are called via # _extract_member() when extract() is called. They can be replaced in a # subclass to implement other functionality. def makedir(self, tarinfo, targetpath): """Make a directory called targetpath. """ try: # Use a safe mode for the directory, the real mode is set # later in _extract_member(). os.mkdir(targetpath, 0o700) except EnvironmentError as e: if e.errno != errno.EEXIST: raise def makefile(self, tarinfo, targetpath): """Make a file called targetpath. """ source = self.fileobj source.seek(tarinfo.offset_data) target = bltn_open(targetpath, "wb") if tarinfo.sparse is not None: for offset, size in tarinfo.sparse: target.seek(offset) copyfileobj(source, target, size) else: copyfileobj(source, target, tarinfo.size) target.seek(tarinfo.size) target.truncate() target.close() def makeunknown(self, tarinfo, targetpath): """Make a file from a TarInfo object with an unknown type at targetpath. """ self.makefile(tarinfo, targetpath) self._dbg(1, "tarfile: Unknown file type %r, " \ "extracted as regular file." % tarinfo.type) def makefifo(self, tarinfo, targetpath): """Make a fifo called targetpath. """ if hasattr(os, "mkfifo"): os.mkfifo(targetpath) else: raise ExtractError("fifo not supported by system") def makedev(self, tarinfo, targetpath): """Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor)) def makelink(self, tarinfo, targetpath): """Make a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. """ try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive") def chown(self, tarinfo, targetpath): """Set owner of targetpath according to tarinfo. """ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: # We have to be root to do so. try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner") def chmod(self, tarinfo, targetpath): """Set file permissions of targetpath according to tarinfo. """ if hasattr(os, 'chmod'): try: os.chmod(targetpath, tarinfo.mode) except EnvironmentError as e: raise ExtractError("could not change mode") def utime(self, tarinfo, targetpath): """Set modification time of targetpath according to tarinfo. """ if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time") #-------------------------------------------------------------------------- def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods: def _getmember(self, name, tarinfo=None, normalize=False): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() # Limit the member search list up to tarinfo. if tarinfo is not None: members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if name == member_name: return member def _load(self): """Read through the entire archive file and look for readable members. """ while True: tarinfo = self.next() if tarinfo is None: break self._loaded = True def _check(self, mode=None): """Check if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. """ if self.closed: raise IOError("%s is closed" % self.__class__.__name__) if mode is not None and self.mode not in mode: raise IOError("bad operation for mode %r" % self.mode) def _find_link_target(self, tarinfo): """Find the target member of a symlink or hardlink member in the archive. """ if tarinfo.issym(): # Always search the entire archive. linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname limit = None else: # Search the archive before the link, because a hard link is # just a reference to an already archived file. linkname = tarinfo.linkname limit = tarinfo member = self._getmember(linkname, tarinfo=limit, normalize=True) if member is None: raise KeyError("linkname %r not found" % linkname) return member def __iter__(self): """Provide an iterator object. """ if self._loaded: return iter(self.members) else: return TarIter(self) def _dbg(self, level, msg): """Write debugging output to sys.stderr. """ if level <= self.debug: print(msg, file=sys.stderr) def __enter__(self): self._check() return self def __exit__(self, type, value, traceback): if type is None: self.close() else: # An exception occurred. We must not call close() because # it would try to write end-of-archive blocks and padding. if not self._extfileobj: self.fileobj.close() self.closed = True # class TarFile class TarIter(object): """Iterator Class. for tarinfo in TarFile(...): suite... """ def __init__(self, tarfile): """Construct a TarIter object. """ self.tarfile = tarfile self.index = 0 def __iter__(self): """Return iterator object. """ return self def __next__(self): """Return the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, # which will cause TarIter to stop prematurely. if not self.tarfile._loaded: tarinfo = self.tarfile.next() if not tarinfo: self.tarfile._loaded = True raise StopIteration else: try: tarinfo = self.tarfile.members[self.index] except IndexError: raise StopIteration self.index += 1 return tarinfo next = __next__ # for Python 2.x #-------------------- # exported functions #-------------------- def is_tarfile(name): """Return True if name points to a tar archive that we are able to handle, else return False. """ try: t = open(name) t.close() return True except TarError: return False bltn_open = open open = TarFile.open
ahaw021/SSL-MAIL-PROTOCOLS-TESTING
refs/heads/master
conversations.py
1
from constants import * import base64 def decide_protocol_handler(client, PROTOCOL): """ Function to decide what conversation based on protocol. As each protocol has a different syntax we need this function to point to the right function. params: client - a python Socket Client - can be secure on insecure (conversations don't care about transport) PROTOCOl - protocol to test - this should be a string """ if PROTOCOL=="smtp": smtp_conversation(client) elif PROTOCOL=="pop": pop_conversation(client) elif PROTOCOL=="imap": imap_conversation(client) else: print("NOT A KNOWN PROTOCOL --- BYEEE") def pop_conversation(client): """ Function to test that POP service is responding as expected POP COMMAND REFERENCE: https://blog.yimingliu.com/2009/01/23/testing-a-pop3-server-via-telnet-or-openssl/ Currently working to Authentication only - no email is sent params: client - a python Socket Client - can be secure on insecure (conversations don't care about transport) """ print("Let's Start a POP Conversation: \r\n") print("Username formatted for POP: {}".format(POP_USER + EMAIL_USERNAME + b' \r\n')) client.send(POP_USER + EMAIL_USERNAME + b' \r\n') data = client.recv(1024) print(data) print("Password formatted for POP: {}".format(POP_PASS + EMAIL_PASSWORD + b' \r\n')) client.send(POP_PASS + EMAIL_PASSWORD + b' \r\n') data = client.recv(1024) print(data) def smtp_conversation(client): """ Function to test that SMTP service is responding as expected SMTP COMMAND REFERENCE: https://www.ndchost.com/wiki/mail/test-smtp-auth-telnet Currently working to Authentication only - no email is sent params: client - a python Socket Client - can be secure on insecure (conversations don't care about transport) """ print("Let's Start an SMTP Conversation: \r\n") client.send(SMTP_AUTH) data = client.recv(1024) b64_encoded = base64.b64encode(EMAIL_USERNAME) print("Username Encoded as base64: {}".format(b64_encoded)) client.send(b64_encoded) client.send(b'\r\n') data = client.recv(1024) #print("Server After Username: {}".format(data)) b64_encoded = base64.b64encode(EMAIL_PASSWORD) print("Password Encoded as base64: {}\r\n".format(b64_encoded)) client.send(b64_encoded) client.send(b'\r\n') data = client.recv(1024) print("Server Response: {}".format(data)) def imap_conversation(client): """ Function to test that IMAP service is responding as expected IMAP COMMAND REFERENCE: http://busylog.net/telnet-imap-commands-note/ Currently working to Authentication only - no email is sent params: client - a python Socket Client - can be secure on insecure (conversations don't care about transport) """ print("Let's Start an IMAP Conversation: \r\n") print("Authentication String: {}".format(IMAP_RAND_STRING + IMAP_LOGIN + EMAIL_USERNAME +b' ' + EMAIL_PASSWORD + b' \r\n')) client.send(IMAP_RAND_STRING + IMAP_LOGIN + EMAIL_USERNAME +b' ' + EMAIL_PASSWORD + b'\r\n') data = client.recv(1024) print(data)
tianweizhang/nova
refs/heads/v0
nova/vnc/xvp_proxy.py
16
#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Eventlet WSGI Services to proxy VNC for XCP protocol.""" import socket import eventlet import eventlet.green import eventlet.greenio import eventlet.wsgi from oslo.config import cfg import webob from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import context from nova.i18n import _ from nova.openstack.common import log as logging from nova import version from nova import wsgi LOG = logging.getLogger(__name__) xvp_proxy_opts = [ cfg.IntOpt('xvpvncproxy_port', default=6081, help='Port that the XCP VNC proxy should bind to'), cfg.StrOpt('xvpvncproxy_host', default='0.0.0.0', help='Address that the XCP VNC proxy should bind to'), ] CONF = cfg.CONF CONF.register_opts(xvp_proxy_opts) class XCPVNCProxy(object): """Class to use the xvp auth protocol to proxy instance vnc consoles.""" def one_way_proxy(self, source, dest): """Proxy tcp connection from source to dest.""" while True: try: d = source.recv(32384) except Exception: d = None # If recv fails, send a write shutdown the other direction if d is None or len(d) == 0: dest.shutdown(socket.SHUT_WR) break # If send fails, terminate proxy in both directions try: # sendall raises an exception on write error, unlike send dest.sendall(d) except Exception: source.close() dest.close() break def handshake(self, req, connect_info, sockets): """Execute hypervisor-specific vnc auth handshaking (if needed).""" host = connect_info['host'] port = int(connect_info['port']) server = eventlet.connect((host, port)) # Handshake as necessary if connect_info.get('internal_access_path'): server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" % connect_info['internal_access_path']) data = "" while True: b = server.recv(1) if b: data += b if data.find("\r\n\r\n") != -1: if not data.split("\r\n")[0].find("200"): LOG.audit(_("Error in handshake: %s"), data) return break if not b or len(data) > 4096: LOG.audit(_("Error in handshake: %s"), data) return client = req.environ['eventlet.input'].get_socket() client.sendall("HTTP/1.1 200 OK\r\n\r\n") sockets['client'] = client sockets['server'] = server def proxy_connection(self, req, connect_info, start_response): """Spawn bi-directional vnc proxy.""" sockets = {} t0 = eventlet.spawn(self.handshake, req, connect_info, sockets) t0.wait() if not sockets.get('client') or not sockets.get('server'): LOG.audit(_("Invalid request: %s"), req) start_response('400 Invalid Request', [('content-type', 'text/html')]) return "Invalid Request" client = sockets['client'] server = sockets['server'] t1 = eventlet.spawn(self.one_way_proxy, client, server) t2 = eventlet.spawn(self.one_way_proxy, server, client) t1.wait() t2.wait() # Make sure our sockets are closed server.close() client.close() def __call__(self, environ, start_response): try: req = webob.Request(environ) LOG.audit(_("Request: %s"), req) token = req.params.get('token') if not token: LOG.audit(_("Request made with missing token: %s"), req) start_response('400 Invalid Request', [('content-type', 'text/html')]) return "Invalid Request" ctxt = context.get_admin_context() api = consoleauth_rpcapi.ConsoleAuthAPI() connect_info = api.check_token(ctxt, token) if not connect_info: LOG.audit(_("Request made with invalid token: %s"), req) start_response('401 Not Authorized', [('content-type', 'text/html')]) return "Not Authorized" return self.proxy_connection(req, connect_info, start_response) except Exception as e: LOG.audit(_("Unexpected error: %s"), e) class SafeHttpProtocol(eventlet.wsgi.HttpProtocol): """HttpProtocol wrapper to suppress IOErrors. The proxy code above always shuts down client connections, so we catch the IOError that raises when the SocketServer tries to flush the connection. """ def finish(self): try: eventlet.green.BaseHTTPServer.BaseHTTPRequestHandler.finish(self) except IOError: pass eventlet.greenio.shutdown_safe(self.connection) self.connection.close() def get_wsgi_server(): LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"), version.version_string_with_package()) return wsgi.Server("XCP VNC Proxy", XCPVNCProxy(), protocol=SafeHttpProtocol, host=CONF.xvpvncproxy_host, port=CONF.xvpvncproxy_port)
fusionbox/django-two-factor-auth
refs/heads/master
two_factor/urls.py
4
from django.conf.urls import patterns, url from two_factor.views import (LoginView, PhoneDeleteView, PhoneSetupView, DisableView, BackupTokensView, SetupCompleteView, SetupView, ProfileView, QRGeneratorView) core = patterns( '', url( regex=r'^account/login/$', view=LoginView.as_view(), name='login', ), url( regex=r'^account/two_factor/setup/$', view=SetupView.as_view(), name='setup', ), url( regex=r'^account/two_factor/qrcode$', view=QRGeneratorView.as_view(), name='qr', ), url( regex=r'^account/two_factor/setup/complete/$', view=SetupCompleteView.as_view(), name='setup_complete', ), url( regex=r'^account/two_factor/backup/tokens/$', view=BackupTokensView.as_view(), name='backup_tokens', ), url( regex=r'^account/two_factor/backup/phone/register/$', view=PhoneSetupView.as_view(), name='phone_create', ), url( regex=r'^account/two_factor/backup/phone/unregister/(?P<pk>\d+)/$', view=PhoneDeleteView.as_view(), name='phone_delete', ), ) profile = patterns( '', url( regex=r'^account/two_factor/$', view=ProfileView.as_view(), name='profile', ), url( regex=r'^account/two_factor/disable/$', view=DisableView.as_view(), name='disable', ), ) urlpatterns = core + profile
NickWoodhams/flask-admin
refs/heads/master
examples/layout/app.py
41
import os import os.path as op from flask import Flask from flask_sqlalchemy import SQLAlchemy import flask_admin as admin from flask_admin.contrib.sqla import ModelView # Create application app = Flask(__name__) # Create dummy secrey key so we can use sessions app.config['SECRET_KEY'] = '123456790' # Create in-memory database app.config['DATABASE_FILE'] = 'sample_db.sqlite' app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE'] app.config['SQLALCHEMY_ECHO'] = True db = SQLAlchemy(app) # Models class User(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Unicode(64)) email = db.Column(db.Unicode(64)) def __unicode__(self): return self.name class Page(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.Unicode(64)) content = db.Column(db.UnicodeText) def __unicode__(self): return self.name # Customized admin interface class CustomView(ModelView): list_template = 'list.html' create_template = 'create.html' edit_template = 'edit.html' class UserAdmin(CustomView): column_searchable_list = ('name',) column_filters = ('name', 'email') # Flask views @app.route('/') def index(): return '<a href="/admin/">Click me to get to Admin!</a>' # Create admin with custom base template admin = admin.Admin(app, 'Example: Layout', base_template='layout.html') # Add views admin.add_view(UserAdmin(User, db.session)) admin.add_view(CustomView(Page, db.session)) def build_sample_db(): """ Populate a small db with some example entries. """ db.drop_all() db.create_all() first_names = [ 'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia', 'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica', 'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy' ] last_names = [ 'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas', 'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson', 'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander' ] for i in range(len(first_names)): user = User() user.name = first_names[i] + " " + last_names[i] user.email = first_names[i].lower() + "@example.com" db.session.add(user) sample_text = [ { 'title': "de Finibus Bonorum et Malorum - Part I", 'content': "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \ dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \ Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \ mollit anim id est laborum." }, { 'title': "de Finibus Bonorum et Malorum - Part II", 'content': "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque \ laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto \ beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur \ aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi \ nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, \ adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam \ aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam \ corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum \ iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum \ qui dolorem eum fugiat quo voluptas nulla pariatur?" }, { 'title': "de Finibus Bonorum et Malorum - Part III", 'content': "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium \ voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati \ cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id \ est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam \ libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod \ maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. \ Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet \ ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur \ a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis \ doloribus asperiores repellat." } ] for entry in sample_text: page = Page() page.title = entry['title'] page.content = entry['content'] db.session.add(page) db.session.commit() return if __name__ == '__main__': # Build a sample db on the fly, if one does not exist yet. app_dir = op.realpath(os.path.dirname(__file__)) database_path = op.join(app_dir, app.config['DATABASE_FILE']) if not os.path.exists(database_path): build_sample_db() # Start app app.run(debug=True)
CingHu/neutron-ustack
refs/heads/master
neutron/tests/unit/test_extension_pnet.py
3
# Copyright 2013 VMware # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Salvatore Orlando, VMware # import mock from oslo.config import cfg from webob import exc as web_exc import webtest from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import router from neutron import context from neutron.extensions import providernet as pnet from neutron import manager from neutron.openstack.common import uuidutils from neutron import quota from neutron.tests.unit import test_api_v2 from neutron.tests.unit import test_extensions from neutron.tests.unit import testlib_api from neutron.tests.unit import testlib_plugin class ProviderExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return pnet.get_extended_resources(version) class ProvidernetExtensionTestCase(testlib_api.WebTestCase, testlib_plugin.PluginSetupHelper): fmt = 'json' def setUp(self): super(ProvidernetExtensionTestCase, self).setUp() plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2' # Ensure existing ExtensionManager is not used extensions.PluginAwareExtensionManager._instance = None # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): self.saved_attr_map[resource] = attrs.copy() # Update the plugin and extensions path self.setup_coreplugin(plugin) cfg.CONF.set_override('allow_pagination', True) cfg.CONF.set_override('allow_sorting', True) self._plugin_patcher = mock.patch(plugin, autospec=True) self.plugin = self._plugin_patcher.start() # Ensure Quota checks never fail because of mock instance = self.plugin.return_value instance.get_networks_count.return_value = 1 # Instantiate mock plugin and enable the 'provider' extension manager.NeutronManager.get_plugin().supported_extension_aliases = ( ["provider"]) ext_mgr = ProviderExtensionManager() self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr) self.addCleanup(self._plugin_patcher.stop) self.addCleanup(self._restore_attribute_map) self.api = webtest.TestApp(router.APIRouter()) quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') def _restore_attribute_map(self): # Restore the global RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map def _prepare_net_data(self): return {'name': 'net1', pnet.NETWORK_TYPE: 'sometype', pnet.PHYSICAL_NETWORK: 'physnet', pnet.SEGMENTATION_ID: 666} def _put_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} instance = self.plugin.return_value instance.get_network.return_value = {'tenant_id': ctx.tenant_id, 'shared': False} net_id = uuidutils.generate_uuid() res = self.api.put(test_api_v2._get_path('networks', id=net_id, fmt=self.fmt), self.serialize({'network': data}), extra_environ=env, expect_errors=expect_errors) return res, data, net_id def _post_network_with_provider_attrs(self, ctx, expect_errors=False): data = self._prepare_net_data() env = {'neutron.context': ctx} res = self.api.post(test_api_v2._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def _post_network_with_bad_provider_attrs(self, ctx, bad_data, expect_errors=False): data = self._prepare_net_data() data.update(bad_data) env = {'neutron.context': ctx} res = self.api.post(test_api_v2._get_path('networks', fmt=self.fmt), self.serialize({'network': data}), content_type='application/' + self.fmt, extra_environ=env, expect_errors=expect_errors) return res, data def test_network_create_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data = self._post_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} exp_input['network'].update({'admin_state_up': True, 'tenant_id': 'an_admin', 'shared': False}) instance.create_network.assert_called_with(mock.ANY, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPCreated.code) def test_network_create_with_bad_provider_attrs_400(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' bad_data = {pnet.SEGMENTATION_ID: "abc"} res, _1 = self._post_network_with_bad_provider_attrs(ctx, bad_data, True) self.assertEqual(web_exc.HTTPBadRequest.code, res.status_int) def test_network_update_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data, net_id = self._put_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} instance.update_network.assert_called_with(mock.ANY, net_id, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPOk.code) def test_network_create_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1 = self._post_network_with_provider_attrs(ctx, True) self.assertEqual(res.status_int, web_exc.HTTPForbidden.code) def test_network_update_with_provider_attrs_noadmin_returns_403(self): tenant_id = 'no_admin' ctx = context.Context('', tenant_id, is_admin=False) res, _1, _2 = self._put_network_with_provider_attrs(ctx, True) self.assertEqual(res.status_int, web_exc.HTTPForbidden.code)
vveerava/Openstack
refs/heads/master
neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
17
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Include all tables and make migrations unconditional. Revision ID: db_healing Revises: 5446f2a45467 Create Date: 2014-05-29 10:52:43.898980 """ # revision identifiers, used by Alembic. revision = 'db_healing' down_revision = '5446f2a45467' from neutron.db.migration.alembic_migrations import heal_script def upgrade(): heal_script.heal() def downgrade(): pass
ddsc/ddsc-incron
refs/heads/master
setup.py
1
from setuptools import setup version = '0.1dev' long_description = '\n\n'.join([ open('README.rst').read(), open('CREDITS.rst').read(), open('CHANGES.rst').read(), ]) install_requires = [ 'celery', 'ddsc-logging', 'setuptools', ], tests_require = [ 'coverage', 'nose', ] setup(name='ddsc-incron', version=version, description="DDSC library to be used with the inotify cron system", long_description=long_description, # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers classifiers=['Programming Language :: Python'], keywords=[], author='Shaoqing Lu', author_email='S.Lu@fugro.nl', url='git@github.com:ddsc/ddsc-incron.git', license='MIT', packages=['ddsc_incron'], include_package_data=True, zip_safe=False, install_requires=install_requires, tests_require=tests_require, extras_require={'test': tests_require}, entry_points={ 'console_scripts': [ 'notify = ddsc_incron.notify:main', ]}, )
akshaya9/fosswebsite
refs/heads/master
attendance/admin.py
2
from django.contrib import admin from attendance.models import DailyAttendance, SSIDName admin.site.register(DailyAttendance) admin.site.register(SSIDName)
CloudBotIRC/CloudBot
refs/heads/master
plugins/minecraft_ping.py
21
import socket from mcstatus import MinecraftServer from cloudbot import hook mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'), ('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'), ('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'), ('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'), ('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', ''), ('\xa7m', '\x13'), ('\xa7r', '\x0f'), ('\xa7n', '\x15')] def format_colors(description): for original, replacement in mc_colors: description = description.replace(original, replacement) return description.replace("\xa7k", "") @hook.command("mcping", "mcp") def mcping(text): """<server[:port]> - gets info about the Minecraft server at <server[:port]>""" try: server = MinecraftServer.lookup(text) except (IOError, ValueError) as e: return e try: s = server.status() except socket.gaierror: return "Invalid hostname" except socket.timeout: return "Request timed out" except ConnectionRefusedError: return "Connection refused" except ConnectionError: return "Connection error" except (IOError, ValueError) as e: return "Error pinging server: {}".format(e) if isinstance(s.description, dict): description = format_colors(" ".join(s.description["text"].split())) else: description = format_colors(" ".join(s.description.split())) # I really hate people for putting colors IN THE VERSION # WTF REALLY THIS IS A THING NOW? if s.latency: return "{}\x0f - \x02{}\x0f - \x02{:.1f}ms\x02" \ " - \x02{}/{}\x02 players".format(description, s.version.name_clean, s.latency, s.players.online, s.players.max).replace("\n", "\x0f - ") else: return "{}\x0f - \x02{}\x0f" \ " - \x02{}/{}\x02 players".format(description, s.version.name_clean, s.players.online, s.players.max).replace("\n", "\x0f - ")
aduchate/bigcouch
refs/heads/master
couchjs/scons/scons-local-2.0.1/SCons/Tool/suncc.py
61
"""SCons.Tool.suncc Tool-specific initialization for Sun Solaris (Forte) CC and cc. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/suncc.py 5134 2010/08/16 23:02:40 bdeegan" import SCons.Util import cc def generate(env): """ Add Builders and construction variables for Forte C and C++ compilers to an Environment. """ cc.generate(env) env['CXX'] = 'CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC') env['SHOBJPREFIX'] = 'so_' env['SHOBJSUFFIX'] = '.o' def exists(env): return env.Detect('CC') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
J861449197/edx-platform
refs/heads/master
common/djangoapps/status/status.py
86
""" A tiny app that checks for a status message. """ from django.conf import settings import logging from .models import GlobalStatusMessage log = logging.getLogger(__name__) def get_site_status_msg(course_key): """ Pull the status message from the database. Caches the message by course. """ try: # The current() value for GlobalStatusMessage is cached. if not GlobalStatusMessage.current().enabled: return None return GlobalStatusMessage.current().full_message(course_key) # Make as general as possible, because something broken here should not # bring down the whole site. except: # pylint: disable=bare-except log.exception("Error while getting a status message.") return None
antoinecarme/pyaf
refs/heads/master
tests/periodicities/Hour/Cycle_Hour_25_H_7.py
1
import tests.periodicities.period_test as per per.buildModel((7 , 'H' , 25));
ChameleonCloud/horizon
refs/heads/chameleoncloud/train
openstack_dashboard/test/integration_tests/pages/admin/overviewpage.py
3
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests.pages import basepage class OverviewPage(basepage.BaseNavigationPage): def __init__(self, driver, conf): super(OverviewPage, self).__init__(driver, conf) self._page_title = "Overview"
carolFrohlich/nipype
refs/heads/master
nipype/interfaces/dipy/tensors.py
5
# -*- coding: utf-8 -*- """Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from __future__ import print_function, division, unicode_literals, absolute_import import nibabel as nb from ... import logging from ..base import TraitedSpec, File, isdefined from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec IFLOGGER = logging.getLogger('interface') class DTIInputSpec(DipyBaseInterfaceInputSpec): mask_file = File(exists=True, desc='An optional white matter mask') class DTIOutputSpec(TraitedSpec): out_file = File(exists=True) fa_file = File(exists=True) md_file = File(exists=True) rd_file = File(exists=True) ad_file = File(exists=True) class DTI(DipyDiffusionInterface): """ Calculates the diffusion tensor model parameters Example ------- >>> import nipype.interfaces.dipy as dipy >>> dti = dipy.DTI() >>> dti.inputs.in_file = 'diffusion.nii' >>> dti.inputs.in_bvec = 'bvecs' >>> dti.inputs.in_bval = 'bvals' >>> dti.run() # doctest: +SKIP """ input_spec = DTIInputSpec output_spec = DTIOutputSpec def _run_interface(self, runtime): from dipy.reconst import dti from dipy.io.utils import nifti1_symmat gtab = self._get_gradient_table() img = nb.load(self.inputs.in_file) data = img.get_data() affine = img.affine mask = None if isdefined(self.inputs.mask_file): mask = nb.load(self.inputs.mask_file).get_data() # Fit it tenmodel = dti.TensorModel(gtab) ten_fit = tenmodel.fit(data, mask) lower_triangular = ten_fit.lower_triangular() img = nifti1_symmat(lower_triangular, affine) out_file = self._gen_filename('dti') nb.save(img, out_file) IFLOGGER.info('DTI parameters image saved as {i}'.format(i=out_file)) #FA MD RD and AD for metric in ["fa", "md", "rd", "ad"]: data = getattr(ten_fit,metric).astype("float32") out_name = self._gen_filename(metric) nb.Nifti1Image(data, affine).to_filename(out_name) IFLOGGER.info('DTI {metric} image saved as {i}'.format(i=out_name, metric=metric)) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self._gen_filename('dti') for metric in ["fa", "md", "rd", "ad"]: outputs["{}_file".format(metric)] = self._gen_filename(metric) return outputs class TensorModeInputSpec(DipyBaseInterfaceInputSpec): mask_file = File(exists=True, desc='An optional white matter mask') class TensorModeOutputSpec(TraitedSpec): out_file = File(exists=True) class TensorMode(DipyDiffusionInterface): """ Creates a map of the mode of the diffusion tensors given a set of diffusion-weighted images, as well as their associated b-values and b-vectors. Fits the diffusion tensors and calculates tensor mode with Dipy. .. [1] Daniel B. Ennis and G. Kindlmann, "Orthogonal Tensor Invariants and the Analysis of Diffusion Tensor Magnetic Resonance Images", Magnetic Resonance in Medicine, vol. 55, no. 1, pp. 136-146, 2006. Example ------- >>> import nipype.interfaces.dipy as dipy >>> mode = dipy.TensorMode() >>> mode.inputs.in_file = 'diffusion.nii' >>> mode.inputs.in_bvec = 'bvecs' >>> mode.inputs.in_bval = 'bvals' >>> mode.run() # doctest: +SKIP """ input_spec = TensorModeInputSpec output_spec = TensorModeOutputSpec def _run_interface(self, runtime): from dipy.reconst import dti # Load the 4D image files img = nb.load(self.inputs.in_file) data = img.get_data() affine = img.get_affine() # Load the gradient strengths and directions gtab = self._get_gradient_table() # Mask the data so that tensors are not fit for # unnecessary voxels mask = data[..., 0] > 50 # Fit the tensors to the data tenmodel = dti.TensorModel(gtab) tenfit = tenmodel.fit(data, mask) # Calculate the mode of each voxel's tensor mode_data = tenfit.mode # Write as a 3D Nifti image with the original affine img = nb.Nifti1Image(mode_data, affine) out_file = self._gen_filename('mode') nb.save(img, out_file) IFLOGGER.info('Tensor mode image saved as {i}'.format(i=out_file)) return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['out_file'] = self._gen_filename('mode') return outputs
xrg/openerp-server
refs/heads/pg84-next
bin/workflow/__init__.py
76
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wkf_service #.apidoc title: Workflow objects # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
michaupl/braincloud
refs/heads/master
braincloud/urls.py
1
from django.conf.urls import patterns, include, url from django.contrib import admin from django.contrib.auth.views import login, logout from tastypie.api import Api from brainblog.views import * from brainblog.api import UserResource from brainindex.api import TextThoughtAjaxResource admin.autodiscover() # rest api rest_api = Api(api_name = 'v1') rest_api.register(UserResource()) rest_api.register(TextThoughtAjaxResource()) #rest_api.register(ThoughtResource()) urlpatterns = patterns( '', # thoughts url(r'^thoughts/$', list_thoughts, name="list_thoughts"), url(r'^thoughts/(?P<tag>.+)/$', list_thoughts, name="thoughts_by_tag"), url(r'^view_thought/(?P<id>\w+)$', view_thought, name="view_thought"), url(r'^add/$', add, name="add_thought"), url(r'^edit/(?P<id>\w+)$', edit, name="edit_thought"), url(r'^delete/(?P<id>\w+)$', delete, name="delete_thought"), # cloud url(r'^$', cloud, name="cloud"), # users url(r'^accounts/login/$', login), url(r'^accounts/logout/$', logout, {'next_page': '/'}, name="logout"), url(r'^accounts/register/$', register, name="register"), # admin url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), # search url(r'^search_results/$', search_thoughts), # rest api url(r'^api/', include(rest_api.urls)), )
mancoast/CPythonPyc_test
refs/heads/master
cpython/244_test_xrange.py
15
# Python test set -- built-in functions import test.test_support, unittest import sys import warnings warnings.filterwarnings("ignore", "integer argument expected", DeprecationWarning, "unittest") class XrangeTest(unittest.TestCase): def test_xrange(self): self.assertEqual(list(xrange(3)), [0, 1, 2]) self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4]) self.assertEqual(list(xrange(0)), []) self.assertEqual(list(xrange(-3)), []) self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7]) self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4]) a = 10 b = 100 c = 50 self.assertEqual(list(xrange(a, a+2)), [a, a+1]) self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1]) self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2]) seq = list(xrange(a, b, c)) self.assert_(a in seq) self.assert_(b not in seq) self.assertEqual(len(seq), 2) seq = list(xrange(b, a, -c)) self.assert_(b in seq) self.assert_(a not in seq) self.assertEqual(len(seq), 2) seq = list(xrange(-a, -b, -c)) self.assert_(-a in seq) self.assert_(-b not in seq) self.assertEqual(len(seq), 2) self.assertRaises(TypeError, xrange) self.assertRaises(TypeError, xrange, 1, 2, 3, 4) self.assertRaises(ValueError, xrange, 1, 2, 0) self.assertRaises(OverflowError, xrange, 1e100, 1e101, 1e101) self.assertRaises(TypeError, xrange, 0, "spam") self.assertRaises(TypeError, xrange, 0, 42, "spam") self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2) self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint) self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint) r = xrange(-sys.maxint, sys.maxint, 2) if sys.maxint > 0x7fffffff: # XXX raising ValueError is less than ideal, but this can't # be fixed until range_length() returns a long in rangeobject.c self.assertRaises(ValueError, len, r) else: self.assertEqual(len(r), sys.maxint) self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2) def test_main(): test.test_support.run_unittest(XrangeTest) if __name__ == "__main__": test_main()
NixaSoftware/CVis
refs/heads/master
venv/bin/tools/build/v2/test/core_update_now.py
45
#!/usr/bin/python # Copyright 2011 Steven Watanabe # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) import BoostBuild import os def basic(): t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions do-print { echo updating $(<) } NOTFILE target1 ; ALWAYS target1 ; do-print target1 ; UPDATE_NOW target1 ; DEPENDS all : target1 ; """) t.run_build_system(["-ffile.jam"], stdout="""\ ...found 1 target... ...updating 1 target... do-print target1 updating target1 ...updated 1 target... ...found 1 target... """) t.cleanup() def ignore_minus_n(): t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions do-print { echo updating $(<) } NOTFILE target1 ; ALWAYS target1 ; do-print target1 ; UPDATE_NOW target1 : : ignore-minus-n ; DEPENDS all : target1 ; """) t.run_build_system(["-ffile.jam", "-n"], stdout="""\ ...found 1 target... ...updating 1 target... do-print target1 echo updating target1 updating target1 ...updated 1 target... ...found 1 target... """) t.cleanup() def failed_target(): t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions fail { exit 1 } NOTFILE target1 ; ALWAYS target1 ; fail target1 ; actions do-print { echo updating $(<) } NOTFILE target2 ; do-print target2 ; DEPENDS target2 : target1 ; UPDATE_NOW target1 : : ignore-minus-n ; DEPENDS all : target1 target2 ; """) t.run_build_system(["-ffile.jam", "-n"], stdout="""\ ...found 1 target... ...updating 1 target... fail target1 exit 1 ...failed fail target1... ...failed updating 1 target... ...found 2 targets... ...updating 1 target... do-print target2 echo updating target2 ...updated 1 target... """) t.cleanup() def missing_target(): t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions do-print { echo updating $(<) } NOTFILE target2 ; do-print target2 ; DEPENDS target2 : target1 ; UPDATE_NOW target1 : : ignore-minus-n ; DEPENDS all : target1 target2 ; """) t.run_build_system(["-ffile.jam", "-n"], status=1, stdout="""\ don't know how to make target1 ...found 1 target... ...can't find 1 target... ...found 2 targets... ...can't make 1 target... """) t.cleanup() def build_once(): """ Make sure that if we call UPDATE_NOW with ignore-minus-n, the target gets updated exactly once regardless of previous calls to UPDATE_NOW with -n in effect. """ t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions do-print { echo updating $(<) } NOTFILE target1 ; ALWAYS target1 ; do-print target1 ; UPDATE_NOW target1 ; UPDATE_NOW target1 : : ignore-minus-n ; UPDATE_NOW target1 : : ignore-minus-n ; DEPENDS all : target1 ; """) t.run_build_system(["-ffile.jam", "-n"], stdout="""\ ...found 1 target... ...updating 1 target... do-print target1 echo updating target1 ...updated 1 target... do-print target1 echo updating target1 updating target1 ...updated 1 target... ...found 1 target... """) t.cleanup() def return_status(): """ Make sure that UPDATE_NOW returns a failure status if the target failed in a previous call to UPDATE_NOW """ t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("file.jam", """\ actions fail { exit 1 } NOTFILE target1 ; ALWAYS target1 ; fail target1 ; ECHO update1: [ UPDATE_NOW target1 ] ; ECHO update2: [ UPDATE_NOW target1 ] ; DEPENDS all : target1 ; """) t.run_build_system(["-ffile.jam"], status=1, stdout="""\ ...found 1 target... ...updating 1 target... fail target1 exit 1 ...failed fail target1... ...failed updating 1 target... update1: update2: ...found 1 target... """) t.cleanup() def save_restore(): """Tests that ignore-minus-n and ignore-minus-q are local to the call to UPDATE_NOW""" t = BoostBuild.Tester(pass_toolset=0, pass_d0=False) t.write("actions.jam", """\ rule fail { NOTFILE $(<) ; ALWAYS $(<) ; } actions fail { exit 1 } rule pass { NOTFILE $(<) ; ALWAYS $(<) ; } actions pass { echo updating $(<) } """) t.write("file.jam", """ include actions.jam ; fail target1 ; fail target2 ; UPDATE_NOW target1 target2 : : $(IGNORE_MINUS_N) : $(IGNORE_MINUS_Q) ; fail target3 ; fail target4 ; UPDATE_NOW target3 target4 ; UPDATE ; """) t.run_build_system(['-n', '-sIGNORE_MINUS_N=1', '-ffile.jam'], stdout='''...found 2 targets... ...updating 2 targets... fail target1 exit 1 ...failed fail target1... fail target2 exit 1 ...failed fail target2... ...failed updating 2 targets... ...found 2 targets... ...updating 2 targets... fail target3 exit 1 fail target4 exit 1 ...updated 2 targets... ''') t.run_build_system(['-q', '-sIGNORE_MINUS_N=1', '-ffile.jam'], status=1, stdout='''...found 2 targets... ...updating 2 targets... fail target1 exit 1 ...failed fail target1... ...failed updating 1 target... ...found 2 targets... ...updating 2 targets... fail target3 exit 1 ...failed fail target3... ...failed updating 1 target... ''') t.run_build_system(['-n', '-sIGNORE_MINUS_Q=1', '-ffile.jam'], stdout='''...found 2 targets... ...updating 2 targets... fail target1 exit 1 fail target2 exit 1 ...updated 2 targets... ...found 2 targets... ...updating 2 targets... fail target3 exit 1 fail target4 exit 1 ...updated 2 targets... ''') t.run_build_system(['-q', '-sIGNORE_MINUS_Q=1', '-ffile.jam'], status=1, stdout='''...found 2 targets... ...updating 2 targets... fail target1 exit 1 ...failed fail target1... fail target2 exit 1 ...failed fail target2... ...failed updating 2 targets... ...found 2 targets... ...updating 2 targets... fail target3 exit 1 ...failed fail target3... ...failed updating 1 target... ''') t.cleanup() basic() ignore_minus_n() failed_target() missing_target() build_once() return_status() save_restore()
awkspace/ansible
refs/heads/devel
lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py
47
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Olivier Boukili <boukili.olivier@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: apache2_mod_proxy author: Olivier Boukili (@oboukili) version_added: "2.2" short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool description: - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member status page has to be enabled and accessible, as this module relies on parsing this page. This module supports ansible check_mode, and requires BeautifulSoup python module. options: balancer_url_suffix: description: - Suffix of the balancer pool url required to access the balancer pool status page (e.g. balancer_vhost[:port]/balancer_url_suffix). default: /balancer-manager/ balancer_vhost: description: - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool. required: true member_host: description: - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to. Port number is autodetected and should not be specified here. If undefined, apache2_mod_proxy module will return a members list of dictionaries of all the current balancer pool members' attributes. state: description: - Desired state of the member host. (absent|disabled),drained,hot_standby,ignore_errors can be simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors). choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"] tls: description: - Use https to access balancer management page. type: bool default: 'no' validate_certs: description: - Validate ssl/tls certificates. type: bool default: 'yes' ''' EXAMPLES = ''' # Get all current balancer pool members' attributes: - apache2_mod_proxy: balancer_vhost: 10.0.0.2 # Get a specific member's attributes: - apache2_mod_proxy: balancer_vhost: myws.mydomain.org balancer_suffix: /lb/ member_host: node1.myws.mydomain.org # Enable all balancer pool members: - apache2_mod_proxy: balancer_vhost: '{{ myloadbalancer_host }}' register: result - apache2_mod_proxy: balancer_vhost: '{{ myloadbalancer_host }}' member_host: '{{ item.host }}' state: present with_items: '{{ result.members }}' # Gracefully disable a member from a loadbalancer node: - apache2_mod_proxy: balancer_vhost: '{{ vhost_host }}' member_host: '{{ member.host }}' state: drained delegate_to: myloadbalancernode - wait_for: host: '{{ member.host }}' port: '{{ member.port }}' state: drained delegate_to: myloadbalancernode - apache2_mod_proxy: balancer_vhost: '{{ vhost_host }}' member_host: '{{ member.host }}' state: absent delegate_to: myloadbalancernode ''' RETURN = ''' member: description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter. type: dict returned: success sample: {"attributes": {"Busy": "0", "Elected": "42", "Factor": "1", "From": "136K", "Load": "0", "Route": null, "RouteRedir": null, "Set": "0", "Status": "Init Ok ", "To": " 47K", "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.20", "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", "path": "/ws", "port": 8080, "protocol": "http", "status": { "disabled": false, "drained": false, "hot_standby": false, "ignore_errors": false } } members: description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args. returned: success type: list sample: [{"attributes": { "Busy": "0", "Elected": "42", "Factor": "1", "From": "136K", "Load": "0", "Route": null, "RouteRedir": null, "Set": "0", "Status": "Init Ok ", "To": " 47K", "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.20", "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", "path": "/ws", "port": 8080, "protocol": "http", "status": { "disabled": false, "drained": false, "hot_standby": false, "ignore_errors": false } }, {"attributes": { "Busy": "0", "Elected": "42", "Factor": "1", "From": "136K", "Load": "0", "Route": null, "RouteRedir": null, "Set": "0", "Status": "Init Ok ", "To": " 47K", "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.21", "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", "path": "/ws", "port": 8080, "protocol": "http", "status": { "disabled": false, "drained": false, "hot_standby": false, "ignore_errors": false} } ] ''' import re import traceback BEAUTIFUL_SOUP_IMP_ERR = None try: from BeautifulSoup import BeautifulSoup except ImportError: BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc() HAS_BEAUTIFULSOUP = False else: HAS_BEAUTIFULSOUP = True # balancer member attributes extraction regexp: EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" # Apache2 server version extraction regexp: APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)" def regexp_extraction(string, _regexp, groups=1): """ Returns the capture group (default=1) specified in the regexp, applied to the string """ regexp_search = re.search(string=str(string), pattern=str(_regexp)) if regexp_search: if regexp_search.group(groups) != '': return str(regexp_search.group(groups)) return None class BalancerMember(object): """ Apache 2.4 mod_proxy LB balancer member. attributes: read-only: host -> member host (string), management_url -> member management url (string), protocol -> member protocol (string) port -> member port (string), path -> member location (string), balancer_url -> url of this member's parent balancer (string), attributes -> whole member attributes (dictionary) module -> ansible module instance (AnsibleModule object). writable: status -> status of the member (dictionary) """ def __init__(self, management_url, balancer_url, module): self.host = regexp_extraction(management_url, str(EXPRESSION), 4) self.management_url = str(management_url) self.protocol = regexp_extraction(management_url, EXPRESSION, 3) self.port = regexp_extraction(management_url, EXPRESSION, 5) self.path = regexp_extraction(management_url, EXPRESSION, 6) self.balancer_url = str(balancer_url) self.module = module def get_member_attributes(self): """ Returns a dictionary of a balancer member's attributes.""" balancer_member_page = fetch_url(self.module, self.management_url) if balancer_member_page[1]['status'] != 200: self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) else: try: soup = BeautifulSoup(balancer_member_page[0]) except TypeError: self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup)) else: subsoup = soup.findAll('table')[1].findAll('tr') keys = subsoup[0].findAll('th') for valuesset in subsoup[1::1]: if re.search(pattern=self.host, string=str(valuesset)): values = valuesset.findAll('td') return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) def get_member_status(self): """ Returns a dictionary of a balancer member's status attributes.""" status_mapping = {'disabled': 'Dis', 'drained': 'Drn', 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} status = {} actual_status = str(self.attributes['Status']) for mode in status_mapping.keys(): if re.search(pattern=status_mapping[mode], string=actual_status): status[mode] = True else: status[mode] = False return status def set_member_status(self, values): """ Sets a balancer member's status attributes amongst pre-mapped values.""" values_mapping = {'disabled': '&w_status_D', 'drained': '&w_status_N', 'hot_standby': '&w_status_H', 'ignore_errors': '&w_status_I'} request_body = regexp_extraction(self.management_url, EXPRESSION, 1) for k in values_mapping.keys(): if values[str(k)]: request_body = request_body + str(values_mapping[k]) + '=1' else: request_body = request_body + str(values_mapping[k]) + '=0' response = fetch_url(self.module, self.management_url, data=str(request_body)) if response[1]['status'] != 200: self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) attributes = property(get_member_attributes) status = property(get_member_status, set_member_status) class Balancer(object): """ Apache httpd 2.4 mod_proxy balancer object""" def __init__(self, host, suffix, module, members=None, tls=False): if tls: self.base_url = str(str('https://') + str(host)) self.url = str(str('https://') + str(host) + str(suffix)) else: self.base_url = str(str('http://') + str(host)) self.url = str(str('http://') + str(host) + str(suffix)) self.module = module self.page = self.fetch_balancer_page() if members is None: self._members = [] def fetch_balancer_page(self): """ Returns the balancer management html page as a string for later parsing.""" page = fetch_url(self.module, str(self.url)) if page[1]['status'] != 200: self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) else: content = page[0].read() apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) if apache_version: if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) return content else: self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager") def get_balancer_members(self): """ Returns members of the balancer as a generator object for later iteration.""" try: soup = BeautifulSoup(self.page) except TypeError: self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) else: for element in soup.findAll('a')[1::1]: balancer_member_suffix = str(element.get('href')) if not balancer_member_suffix: self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") else: yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) members = property(get_balancer_members) def main(): """ Initiates module.""" module = AnsibleModule( argument_spec=dict( balancer_vhost=dict(required=True, default=None, type='str'), balancer_url_suffix=dict(default="/balancer-manager/", type='str'), member_host=dict(type='str'), state=dict(type='str'), tls=dict(default=False, type='bool'), validate_certs=dict(default=True, type='bool') ), supports_check_mode=True ) if HAS_BEAUTIFULSOUP is False: module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) if module.params['state'] is not None: states = module.params['state'].split(',') if (len(states) > 1) and (("present" in states) or ("enabled" in states)): module.fail_json(msg="state present/enabled is mutually exclusive with other states!") else: for _state in states: if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: module.fail_json( msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." ) else: states = ['None'] mybalancer = Balancer(module.params['balancer_vhost'], module.params['balancer_url_suffix'], module=module, tls=module.params['tls']) if module.params['member_host'] is None: json_output_list = [] for member in mybalancer.members: json_output_list.append({ "host": member.host, "status": member.status, "protocol": member.protocol, "port": member.port, "path": member.path, "attributes": member.attributes, "management_url": member.management_url, "balancer_url": member.balancer_url }) module.exit_json( changed=False, members=json_output_list ) else: changed = False member_exists = False member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} for mode in member_status.keys(): for state in states: if mode == state: member_status[mode] = True elif mode == 'disabled' and state == 'absent': member_status[mode] = True for member in mybalancer.members: if str(member.host) == str(module.params['member_host']): member_exists = True if module.params['state'] is not None: member_status_before = member.status if not module.check_mode: member_status_after = member.status = member_status else: member_status_after = member_status if member_status_before != member_status_after: changed = True json_output = { "host": member.host, "status": member.status, "protocol": member.protocol, "port": member.port, "path": member.path, "attributes": member.attributes, "management_url": member.management_url, "balancer_url": member.balancer_url } if member_exists: module.exit_json( changed=changed, member=json_output ) else: module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.urls import fetch_url if __name__ == '__main__': main()
pluckljn/paimei
refs/heads/master
console/modules/_PAIMEIexplorer/ExplorerTreeCtrl.py
6
# # PaiMei # Copyright (C) 2006 Pedram Amini <pedram.amini@gmail.com> # # $Id: ExplorerTreeCtrl.py 193 2007-04-05 13:30:01Z cameron $ # # This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with this program; if not, write to the Free # Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # ''' @author: Pedram Amini @license: GNU General Public License 2.0 or later @contact: pedram.amini@gmail.com @organization: www.openrce.org ''' import wx import re import MySQLdb import pida class ExplorerTreeCtrl (wx.TreeCtrl): ''' Our custom tree control. ''' def __init__ (self, parent, id, pos=None, size=None, style=None, top=None): wx.TreeCtrl.__init__(self, parent, id, pos, size, style) self.top = top self.selected = None self.used_for_stalk = None # setup our custom tree list control. self.icon_list = wx.ImageList(16, 16) self.icon_folder = self.icon_list.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, (16, 16))) self.icon_folder_open = self.icon_list.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER_OPEN, wx.ART_OTHER, (16, 16))) self.icon_tag = self.icon_list.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, (16, 16))) self.icon_selected = self.icon_list.Add(wx.ArtProvider_GetBitmap(wx.ART_FIND, wx.ART_OTHER, (16, 16))) self.icon_filtered = self.icon_list.Add(wx.ArtProvider_GetBitmap(wx.ART_CUT, wx.ART_OTHER, (16, 16))) self.SetImageList(self.icon_list) self.root = self.AddRoot("Modules") self.SetPyData(self.root, None) self.SetItemImage(self.root, self.icon_folder, wx.TreeItemIcon_Normal) self.SetItemImage(self.root, self.icon_folder_open, wx.TreeItemIcon_Expanded) #################################################################################################################### def on_item_activated (self, event): ''' Make record of the selected target/tag combination. ''' if not self.selected: return selected = self.GetPyData(self.selected) # module selected. if type(selected) == pida.module: pass # function selected. elif type(selected) == pida.function: disasm = """ <html> <body text=#eeeeee bgcolor=#000000> <font size=4><b>%s</b></font> <font face=courier size=2> """ % (selected.name) for bb in selected.sorted_nodes(): disasm += "<p>" # chunked block. if selected.ea_start > bb.ea_start > selected.ea_end: disasm += "<font color=blue>CHUNKED BLOCK --------------------</font><br>" for ins in bb.sorted_instructions(): ins_disasm = ins.disasm ins_disasm = re.sub("(?P<op>^j..?)\s", "<font color=yellow>\g<op> </font>", ins_disasm) ins_disasm = re.sub("(?P<op>^call)\s", "<font color=red>\g<op> </font>", ins_disasm) disasm += "<font color=#999999>%08x</font>&nbsp;&nbsp;%s<br>" % (ins.ea, ins_disasm) disasm += "</font></body></html>" self.top.disassembly.SetPage(disasm) # basic block selected. elif type(selected) == pida.basic_block: pass #################################################################################################################### def on_item_right_click (self, event): if not self.selected: return if not self.x or not self.y: return selected = self.GetPyData(self.selected) ### ### root node. ### if selected == None: return ### ### module node. ### elif type(selected) == pida.module: # we only have to do this once, that is what the hasattr() check is for. if not hasattr(self, "right_click_popup_remove_module"): self.right_click_popup_remove_module = wx.NewId() self.Bind(wx.EVT_MENU, self.on_right_click_popup_remove_module, id=self.right_click_popup_remove_module) # make a menu. menu = wx.Menu() menu.Append(self.right_click_popup_remove_module, "Remove Module") self.PopupMenu(menu, (self.x, self.y)) menu.Destroy() ### ### function node. ### elif type(selected) == pida.function: # we only have to do this once, that is what the hasattr() check is for. if not hasattr(self, "right_click_popup_graph_function"): self.right_click_popup_graph_function = wx.NewId() self.Bind(wx.EVT_MENU, self.on_right_click_popup_graph_function, id=self.right_click_popup_graph_function) # make a menu. menu = wx.Menu() menu.Append(self.right_click_popup_graph_function, "Graph Function") self.PopupMenu(menu, (self.x, self.y)) menu.Destroy() ### ### basic block node. ### elif type(selected) == pida.function: return #################################################################################################################### def on_item_right_down (self, event): ''' Grab the x/y coordinates when the right mouse button is clicked. ''' self.x = event.GetX() self.y = event.GetY() item, flags = self.HitTest((self.x, self.y)) if flags & wx.TREE_HITTEST_ONITEM: self.SelectItem(item) else: self.x = None self.y = None #################################################################################################################### def on_item_sel_changed (self, event): ''' Update the current selected tree control item on every selection change. ''' self.selected = event.GetItem() #################################################################################################################### def load_module (self, module_name): ''' Load the specified module into the tree. ''' tree_module = self.AppendItem(self.root, module_name) self.SetPyData(tree_module, self.top.pida_modules[module_name]) self.SetItemImage(tree_module, self.icon_folder, wx.TreeItemIcon_Normal) self.SetItemImage(tree_module, self.icon_folder_open, wx.TreeItemIcon_Expanded) sorted_functions = [f.ea_start for f in self.top.pida_modules[module_name].nodes.values() if not f.is_import] sorted_functions.sort() for func_key in sorted_functions: function = self.top.pida_modules[module_name].nodes[func_key] tree_function = self.AppendItem(tree_module, "%08x - %s" % (function.ea_start, function.name)) self.SetPyData(tree_function, self.top.pida_modules[module_name].nodes[func_key]) self.SetItemImage(tree_function, self.icon_folder, wx.TreeItemIcon_Normal) self.SetItemImage(tree_function, self.icon_folder_open, wx.TreeItemIcon_Expanded) sorted_bbs = function.nodes.keys() sorted_bbs.sort() for bb_key in sorted_bbs: bb = function.nodes[bb_key] tree_bb = self.AppendItem(tree_function, "%08x" % bb.ea_start) self.SetPyData(tree_bb, function.nodes[bb_key]) self.SetItemImage(tree_bb, self.icon_tag, wx.TreeItemIcon_Normal) self.Expand(self.root) #################################################################################################################### def on_right_click_popup_graph_function (self, event): ''' Right click event handler for popup add graph function menu selection. ''' if not self.selected: return selected = self.GetPyData(self.selected) udraw = self.top.main_frame.udraw if not udraw: self.top.err("No available connection to uDraw(Graph) server.") return try: udraw.graph_new(selected) except: self.top.main_frame.udraw = None self.top.err("Connection to uDraw(Graph) server severed.") #################################################################################################################### def on_right_click_popup_remove_module (self, event): ''' Right click event handler for popup add remove module menu selection. ''' if not self.selected: return self.DeleteChildren(self.selected) self.Delete(self.selected)
tobias47n9e/social-core
refs/heads/master
social_core/tests/backends/test_azuread.py
5
""" Copyright (c) 2015 Microsoft Open Technologies, Inc. All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import json from .oauth import OAuth2Test class AzureADOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.azuread.AzureADOAuth2' user_data_url = 'https://graph.windows.net/me' expected_username = 'foobar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer', 'id_token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJodHRwczovL' '3N0cy53aW5kb3dzLm5ldC83Mjc0MDZhYy03MDY4LTQ4ZmEtOTJiOS1jMmQ' '2NzIxMWJjNTAvIiwiaWF0IjpudWxsLCJleHAiOm51bGwsImF1ZCI6IjAyO' 'WNjMDEwLWJiNzQtNGQyYi1hMDQwLWY5Y2VkM2ZkMmM3NiIsInN1YiI6In' 'FVOHhrczltSHFuVjZRMzR6aDdTQVpvY2loOUV6cnJJOW1wVlhPSWJWQTg' 'iLCJ2ZXIiOiIxLjAiLCJ0aWQiOiI3Mjc0MDZhYy03MDY4LTQ4ZmEtOTJi' 'OS1jMmQ2NzIxMWJjNTAiLCJvaWQiOiI3ZjhlMTk2OS04YjgxLTQzOGMtO' 'GQ0ZS1hZDZmNTYyYjI4YmIiLCJ1cG4iOiJmb29iYXJAdGVzdC5vbm1pY3' 'Jvc29mdC5jb20iLCJnaXZlbl9uYW1lIjoiZm9vIiwiZmFtaWx5X25hbWU' 'iOiJiYXIiLCJuYW1lIjoiZm9vIGJhciIsInVuaXF1ZV9uYW1lIjoiZm9v' 'YmFyQHRlc3Qub25taWNyb3NvZnQuY29tIiwicHdkX2V4cCI6IjQ3MzMwO' 'TY4IiwicHdkX3VybCI6Imh0dHBzOi8vcG9ydGFsLm1pY3Jvc29mdG9ubG' 'luZS5jb20vQ2hhbmdlUGFzc3dvcmQuYXNweCJ9.3V50dHXTZOHj9UWtkn' '2g7BjX5JxNe8skYlK4PdhiLz4', 'expires_in': 3600, 'expires_on': 1423650396, 'not_before': 1423646496 }) refresh_token_body = json.dumps({ 'access_token': 'foobar-new-token', 'token_type': 'bearer', 'expires_in': 3600, 'refresh_token': 'foobar-new-refresh-token', 'scope': 'identity' }) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline() def test_refresh_token(self): user, social = self.do_refresh_token() self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
pneerincx/easybuild-framework
refs/heads/master
easybuild/tools/toolchain/toolchainvariables.py
9
# # # Copyright 2012-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. # # """ Toolchain specific variables @author: Stijn De Weirdt (Ghent University) @author: Kenneth Hoste (Ghent University) """ from easybuild.tools.variables import Variables, join_map_class from easybuild.tools.toolchain.constants import ALL_MAP_CLASSES from easybuild.tools.toolchain.variables import LinkerFlagList, FlagList class ToolchainVariables(Variables): """ Class to hold variable-like key/value pairs in context of compilers (i.e. the generated string are e.g. compiler options or link flags) """ MAP_CLASS = join_map_class(ALL_MAP_CLASSES) # join_map_class strips explanation DEFAULT_CLASS = FlagList LINKER_TOGGLE_START_STOP_GROUP = None LINKER_TOGGLE_STATIC_DYNAMIC = None def add_begin_end_linkerflags(self, lib, toggle_startstopgroup=False, toggle_staticdynamic=False): """ For given lib if toggle_startstopgroup: toggle begin/end group if toggle_staticdynamic: toggle static/dynamic """ class LFL(LinkerFlagList): LINKER_TOGGLE_START_STOP_GROUP = self.LINKER_TOGGLE_START_STOP_GROUP LINKER_TOGGLE_STATIC_DYNAMIC = self.LINKER_TOGGLE_STATIC_DYNAMIC def make_lfl(begin=True): """make linkerflaglist for begin/end of library""" lfl = LFL() if toggle_startstopgroup: if begin: lfl.toggle_startgroup() else: lfl.toggle_stopgroup() if toggle_staticdynamic: if begin: lfl.toggle_static() else: lfl.toggle_dynamic() return lfl if lib is not None: lib.BEGIN = make_lfl(True) lib.BEGIN.IS_BEGIN = True lib.END = make_lfl(False) lib.END.IS_END = True
mKeRix/home-assistant
refs/heads/dev
tests/components/yamaha/__init__.py
36
"""Tests for the yamaha component."""
fabaff/ansible
refs/heads/devel
lib/ansible/new_inventory/__init__.py
53
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import sys from ansible import constants as C from ansible.inventory.group import Group from .host import Host from ansible.plugins.inventory.aggregate import InventoryAggregateParser from ansible import errors class Inventory: ''' Create hosts and groups from inventory Retrieve the hosts and groups that ansible knows about from this class. Retrieve raw variables (non-expanded) from the Group and Host classes returned from here. ''' def __init__(self, inventory_list=C.DEFAULT_HOST_LIST): ''' :kwarg inventory_list: A list of inventory sources. This may be file names which will be parsed as ini-like files, executable scripts which return inventory data as json, directories of both of the above, or hostnames. Files and directories are :kwarg vault_password: Password to use if any of the inventory sources are in an ansible vault ''' self._restricted_to = None self._filter_pattern = None parser = InventoryAggregateParser(inventory_list) parser.parse() self._basedir = parser.basedir self._hosts = parser.hosts self._groups = parser.groups def get_hosts(self): ''' Return the list of hosts, after filtering based on any set pattern and restricting the results based on the set host restrictions. ''' if self._filter_pattern: hosts = self._filter_hosts() else: hosts = self._hosts[:] if self._restricted_to is not None: # this will preserve the order of hosts after intersecting them res_set = set(hosts).intersection(self._restricted_to) return [h for h in hosts if h in res_set] else: return hosts[:] def get_groups(self): ''' Retrieve the Group objects known to the Inventory ''' return self._groups[:] def get_host(self, hostname): ''' Retrieve the Host object for a hostname ''' for host in self._hosts: if host.name == hostname: return host return None def get_group(self, groupname): ''' Retrieve the Group object for a groupname ''' for group in self._groups: if group.name == groupname: return group return None def add_group(self, group): ''' Add a new group to the inventory ''' if group not in self._groups: self._groups.append(group) def set_filter_pattern(self, pattern='all'): ''' Sets a pattern upon which hosts/groups will be filtered. This pattern can contain logical groupings such as unions, intersections and negations using special syntax. ''' self._filter_pattern = pattern def set_host_restriction(self, restriction): ''' Restrict operations to hosts in the given list ''' assert isinstance(restriction, list) self._restricted_to = restriction[:] def remove_host_restriction(self): ''' Remove the restriction on hosts, if any. ''' self._restricted_to = None def _filter_hosts(self): """ Limits inventory results to a subset of inventory that matches a given list of patterns, such as to select a subset of a hosts selection that also belongs to a certain geographic group or numeric slice. Corresponds to --limit parameter to ansible-playbook :arg patterns: The pattern to limit with. If this is None it clears the subset. Multiple patterns may be specified as a comma, semicolon, or colon separated string. """ hosts = [] pattern_regular = [] pattern_intersection = [] pattern_exclude = [] patterns = self._pattern.replace(";",":").split(":") for p in patterns: if p.startswith("!"): pattern_exclude.append(p) elif p.startswith("&"): pattern_intersection.append(p) elif p: pattern_regular.append(p) # if no regular pattern was given, hence only exclude and/or intersection # make that magically work if pattern_regular == []: pattern_regular = ['all'] # when applying the host selectors, run those without the "&" or "!" # first, then the &s, then the !s. patterns = pattern_regular + pattern_intersection + pattern_exclude for p in patterns: intersect = False negate = False if p.startswith('&'): intersect = True elif p.startswith('!'): p = p[1:] negate = True target = self._resolve_pattern(p) if isinstance(target, Host): if negate and target in hosts: # remove it hosts.remove(target) elif target not in hosts: # for both union and intersections, we just append it hosts.append(target) else: if intersect: hosts = [ h for h in hosts if h not in target ] elif negate: hosts = [ h for h in hosts if h in target ] else: to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ] hosts.extend(to_append) return hosts def _resolve_pattern(self, pattern): target = self.get_host(pattern) if target: return target else: (name, enumeration_details) = self._enumeration_info(pattern) hpat = self._hosts_in_unenumerated_pattern(name) result = self._apply_ranges(pattern, hpat) return result def _enumeration_info(self, pattern): """ returns (pattern, limits) taking a regular pattern and finding out which parts of it correspond to start/stop offsets. limits is a tuple of (start, stop) or None """ # Do not parse regexes for enumeration info if pattern.startswith('~'): return (pattern, None) # The regex used to match on the range, which can be [x] or [x-y]. pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") m = pattern_re.match(pattern) if m: (target, first, last, rest) = m.groups() first = int(first) if last: if first < 0: raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") last = int(last) else: last = first return (target, (first, last)) else: return (pattern, None) def _apply_ranges(self, pat, hosts): """ given a pattern like foo, that matches hosts, return all of hosts given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts """ # If there are no hosts to select from, just return the # empty set. This prevents trying to do selections on an empty set. # issue#6258 if not hosts: return hosts (loose_pattern, limits) = self._enumeration_info(pat) if not limits: return hosts (left, right) = limits if left == '': left = 0 if right == '': right = 0 left=int(left) right=int(right) try: if left != right: return hosts[left:right] else: return [ hosts[left] ] except IndexError: raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) def _hosts_in_unenumerated_pattern(self, pattern): """ Get all host names matching the pattern """ results = [] hosts = [] hostnames = set() # ignore any negative checks here, this is handled elsewhere pattern = pattern.replace("!","").replace("&", "") def __append_host_to_results(host): if host not in results and host.name not in hostnames: hostnames.add(host.name) results.append(host) groups = self.get_groups() for group in groups: if pattern == 'all': for host in group.get_hosts(): __append_host_to_results(host) else: if self._match(group.name, pattern): for host in group.get_hosts(): __append_host_to_results(host) else: matching_hosts = self._match_list(group.get_hosts(), 'name', pattern) for host in matching_hosts: __append_host_to_results(host) if pattern in ["localhost", "127.0.0.1"] and len(results) == 0: new_host = self._create_implicit_localhost(pattern) results.append(new_host) return results def _create_implicit_localhost(self, pattern): new_host = Host(pattern) new_host._connection = 'local' new_host.set_variable("ansible_python_interpreter", sys.executable) ungrouped = self.get_group("ungrouped") if ungrouped is None: self.add_group(Group('ungrouped')) ungrouped = self.get_group('ungrouped') self.get_group('all').add_child_group(ungrouped) ungrouped.add_host(new_host) return new_host def is_file(self): ''' Did inventory come from a file? :returns: True if the inventory is file based, False otherwise ''' pass def src(self): ''' What's the complete path to the inventory file? :returns: Complete path to the inventory file. None if inventory is not file-based ''' pass def basedir(self): ''' What directory from which the inventory was read. ''' return self._basedir
timwee/emacs-starter-kit-mr-flip-forked
refs/heads/master
vendor/rope/rope/base/resources.py
58
import os import re import rope.base.change import rope.base.fscommands from rope.base import exceptions class Resource(object): """Represents files and folders in a project""" def __init__(self, project, path): self.project = project self._path = path def move(self, new_location): """Move resource to `new_location`""" self._perform_change(rope.base.change.MoveResource(self, new_location), 'Moving <%s> to <%s>' % (self.path, new_location)) def remove(self): """Remove resource from the project""" self._perform_change(rope.base.change.RemoveResource(self), 'Removing <%s>' % self.path) def is_folder(self): """Return true if the resource is a folder""" def create(self): """Create this resource""" def exists(self): return os.path.exists(self.real_path) @property def parent(self): parent = '/'.join(self.path.split('/')[0:-1]) return self.project.get_folder(parent) @property def path(self): """Return the path of this resource relative to the project root The path is the list of parent directories separated by '/' followed by the resource name. """ return self._path @property def name(self): """Return the name of this resource""" return self.path.split('/')[-1] @property def real_path(self): """Return the file system path of this resource""" return self.project._get_resource_path(self.path) def __eq__(self, obj): return self.__class__ == obj.__class__ and self.path == obj.path def __ne__(self, obj): return not self.__eq__(obj) def __hash__(self): return hash(self.path) def _perform_change(self, change_, description): changes = rope.base.change.ChangeSet(description) changes.add_change(change_) self.project.do(changes) class File(Resource): """Represents a file""" def __init__(self, project, name): super(File, self).__init__(project, name) def read(self): data = self.read_bytes() try: return rope.base.fscommands.file_data_to_unicode(data) except UnicodeDecodeError, e: raise exceptions.ModuleDecodeError(self.path, e.reason) def read_bytes(self): return open(self.real_path, 'rb').read() def write(self, contents): try: if contents == self.read(): return except IOError: pass self._perform_change(rope.base.change.ChangeContents(self, contents), 'Writing file <%s>' % self.path) def is_folder(self): return False def create(self): self.parent.create_file(self.name) class Folder(Resource): """Represents a folder""" def __init__(self, project, name): super(Folder, self).__init__(project, name) def is_folder(self): return True def get_children(self): """Return the children of this folder""" result = [] for name in os.listdir(self.real_path): try: child = self.get_child(name) except exceptions.ResourceNotFoundError: continue if not self.project.is_ignored(child): result.append(self.get_child(name)) return result def create_file(self, file_name): self._perform_change( rope.base.change.CreateFile(self, file_name), 'Creating file <%s>' % self._get_child_path(file_name)) return self.get_child(file_name) def create_folder(self, folder_name): self._perform_change( rope.base.change.CreateFolder(self, folder_name), 'Creating folder <%s>' % self._get_child_path(folder_name)) return self.get_child(folder_name) def _get_child_path(self, name): if self.path: return self.path + '/' + name else: return name def get_child(self, name): return self.project.get_resource(self._get_child_path(name)) def has_child(self, name): try: self.get_child(name) return True except exceptions.ResourceNotFoundError: return False def get_files(self): return [resource for resource in self.get_children() if not resource.is_folder()] def get_folders(self): return [resource for resource in self.get_children() if resource.is_folder()] def contains(self, resource): if self == resource: return False return self.path == '' or resource.path.startswith(self.path + '/') def create(self): self.parent.create_folder(self.name) class _ResourceMatcher(object): def __init__(self): self.patterns = [] self._compiled_patterns = [] def set_patterns(self, patterns): """Specify which resources to match `patterns` is a `list` of `str`\s that can contain ``*`` and ``?`` signs for matching resource names. """ self._compiled_patterns = None self.patterns = patterns def _add_pattern(self, pattern): re_pattern = pattern.replace('.', '\\.').\ replace('*', '[^/]*').replace('?', '[^/]').\ replace('//', '/(.*/)?') re_pattern = '^(.*/)?' + re_pattern + '(/.*)?$' self.compiled_patterns.append(re.compile(re_pattern)) def does_match(self, resource): for pattern in self.compiled_patterns: if pattern.match(resource.path): return True path = os.path.join(resource.project.address, *resource.path.split('/')) if os.path.islink(path): return True return False @property def compiled_patterns(self): if self._compiled_patterns is None: self._compiled_patterns = [] for pattern in self.patterns: self._add_pattern(pattern) return self._compiled_patterns
abhilashnta/edx-platform
refs/heads/master
cms/djangoapps/contentstore/features/course_import.py
176
# pylint: disable=missing-docstring # pylint: disable=redefined-outer-name # pylint: disable=unused-argument import os from lettuce import world, step from django.conf import settings def import_file(filename): world.browser.execute_script("$('input.file-input').css('display', 'block')") path = os.path.join(settings.COMMON_TEST_DATA_ROOT, "imports", filename) world.browser.attach_file('course-data', os.path.abspath(path)) world.css_click('input.submit-button') # Go to course outline world.click_course_content() outline_css = 'li.nav-course-courseware-outline a' world.css_click(outline_css) @step('I go to the import page$') def go_to_import(step): menu_css = 'li.nav-course-tools' import_css = 'li.nav-course-tools-import a' world.css_click(menu_css) world.css_click(import_css)
fernandojvdasilva/centering_py
refs/heads/master
src/corpus/Sentence.py
1
''' Created on 03/11/2009 @author: Fernando Copyright 2009-2013 Fernando J. V. da Silva This file is part of centering_py. centering_py is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. centering_py is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with centering_py. If not, see <http://www.gnu.org/licenses/>. ''' from corpus.rst.RST_Node import * from anaphor_resolution.Centering_Elements import * class Sentence(RST_Node, Un): ''' This class holds the words of a sentence ''' def __init__(self, words=None): ''' Constructor ''' RST_Node.__init__(self) Un.__init__(self) #set of Word objects which constitutes the sentence if words != None: self.words = words else: self.words = [] def copy(self): result = Sentence() for word in self.words: result.words.append(word.copy(result)) for word in self.vein: result.vein.append(word.copy(result)) for word in self.head: result.head.append(word.copy(result)) for word in self.label: result.label.append(word.copy(result)) result.index = self.index return result def asString(self): result = '' for word in self.words: result = result + word.properties['text'] + ' ' return result
alexsanjoseph/duolingo-save-streak
refs/heads/master
werkzeug/datastructures.py
56
# -*- coding: utf-8 -*- """ werkzeug.datastructures ~~~~~~~~~~~~~~~~~~~~~~~ This module provides mixins and classes with an immutable interface. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import codecs import mimetypes from copy import deepcopy from itertools import repeat from collections import Container, Iterable, MutableSet from werkzeug._internal import _missing, _empty_stream from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \ PY2, text_type, integer_types, string_types, make_literal_wrapper, \ to_native from werkzeug.filesystem import get_filesystem_encoding _locale_delim_re = re.compile(r'[_-]') def is_immutable(self): raise TypeError('%r objects are immutable' % self.__class__.__name__) def iter_multi_items(mapping): """Iterates over the items of a mapping yielding keys and values without dropping any from more complex structures. """ if isinstance(mapping, MultiDict): for item in iteritems(mapping, multi=True): yield item elif isinstance(mapping, dict): for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): for value in value: yield key, value else: yield key, value else: for item in mapping: yield item def native_itermethods(names): if not PY2: return lambda x: x def setviewmethod(cls, name): viewmethod_name = 'view%s' % name viewmethod = lambda self, *a, **kw: ViewItems(self, name, 'view_%s' % name, *a, **kw) viewmethod.__doc__ = \ '"""`%s()` object providing a view on %s"""' % (viewmethod_name, name) setattr(cls, viewmethod_name, viewmethod) def setitermethod(cls, name): itermethod = getattr(cls, name) setattr(cls, 'iter%s' % name, itermethod) listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw)) listmethod.__doc__ = \ 'Like :py:meth:`iter%s`, but returns a list.' % name setattr(cls, name, listmethod) def wrap(cls): for name in names: setitermethod(cls, name) setviewmethod(cls, name) return cls return wrap class ImmutableListMixin(object): """Makes a :class:`list` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(tuple(self)) return rv def __reduce_ex__(self, protocol): return type(self), (list(self),) def __delitem__(self, key): is_immutable(self) def __iadd__(self, other): is_immutable(self) __imul__ = __iadd__ def __setitem__(self, key, value): is_immutable(self) def append(self, item): is_immutable(self) remove = append def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def reverse(self): is_immutable(self) def sort(self, cmp=None, key=None, reverse=None): is_immutable(self) class ImmutableList(ImmutableListMixin, list): """An immutable :class:`list`. .. versionadded:: 0.5 :private: """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, list.__repr__(self), ) class ImmutableDictMixin(object): """Makes a :class:`dict` immutable. .. versionadded:: 0.5 :private: """ _hash_cache = None @classmethod def fromkeys(cls, keys, value=None): instance = super(cls, cls).__new__(cls) instance.__init__(zip(keys, repeat(value))) return instance def __reduce_ex__(self, protocol): return type(self), (dict(self),) def _iter_hashitems(self): return iteritems(self) def __hash__(self): if self._hash_cache is not None: return self._hash_cache rv = self._hash_cache = hash(frozenset(self._iter_hashitems())) return rv def setdefault(self, key, default=None): is_immutable(self) def update(self, *args, **kwargs): is_immutable(self) def pop(self, key, default=None): is_immutable(self) def popitem(self): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) def __delitem__(self, key): is_immutable(self) def clear(self): is_immutable(self) class ImmutableMultiDictMixin(ImmutableDictMixin): """Makes a :class:`MultiDict` immutable. .. versionadded:: 0.5 :private: """ def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def _iter_hashitems(self): return iteritems(self, multi=True) def add(self, key, value): is_immutable(self) def popitemlist(self): is_immutable(self) def poplist(self, key): is_immutable(self) def setlist(self, key, new_list): is_immutable(self) def setlistdefault(self, key, default_list=None): is_immutable(self) class UpdateDictMixin(object): """Makes dicts call `self.on_update` on modifications. .. versionadded:: 0.5 :private: """ on_update = None def calls_update(name): def oncall(self, *args, **kw): rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw) if self.on_update is not None: self.on_update(self) return rv oncall.__name__ = name return oncall def setdefault(self, key, default=None): modified = key not in self rv = super(UpdateDictMixin, self).setdefault(key, default) if modified and self.on_update is not None: self.on_update(self) return rv def pop(self, key, default=_missing): modified = key in self if default is _missing: rv = super(UpdateDictMixin, self).pop(key) else: rv = super(UpdateDictMixin, self).pop(key, default) if modified and self.on_update is not None: self.on_update(self) return rv __setitem__ = calls_update('__setitem__') __delitem__ = calls_update('__delitem__') clear = calls_update('clear') popitem = calls_update('popitem') update = calls_update('update') del calls_update class TypeConversionDict(dict): """Works like a regular dict but the :meth:`get` method can perform type conversions. :class:`MultiDict` and :class:`CombinedMultiDict` are subclasses of this class and provide the same feature. .. versionadded:: 0.5 """ def get(self, key, default=None, type=None): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = TypeConversionDict(foo='42', bar='blub') >>> d.get('foo', type=int) 42 >>> d.get('bar', -1, type=int) -1 :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the default value is returned. """ try: rv = self[key] if type is not None: rv = type(rv) except (KeyError, ValueError): rv = default return rv class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict): """Works like a :class:`TypeConversionDict` but does not support modifications. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return TypeConversionDict(self) def __copy__(self): return self class ViewItems(object): def __init__(self, multi_dict, method, repr_name, *a, **kw): self.__multi_dict = multi_dict self.__method = method self.__repr_name = repr_name self.__a = a self.__kw = kw def __get_items(self): return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw) def __repr__(self): return '%s(%r)' % (self.__repr_name, list(self.__get_items())) def __iter__(self): return iter(self.__get_items()) @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class MultiDict(TypeConversionDict): """A :class:`MultiDict` is a dictionary subclass customized to deal with multiple values for the same key which is for example used by the parsing functions in the wrappers. This is necessary because some HTML form elements pass multiple values for the same key. :class:`MultiDict` implements all standard dictionary methods. Internally, it saves all values for a key as a list, but the standard dict access methods will only return the first value for a key. If you want to gain access to the other values, too, you have to use the `list` methods as explained below. Basic Usage: >>> d = MultiDict([('a', 'b'), ('a', 'c')]) >>> d MultiDict([('a', 'b'), ('a', 'c')]) >>> d['a'] 'b' >>> d.getlist('a') ['b', 'c'] >>> 'a' in d True It behaves like a normal dict thus all dict functions will only return the first value when multiple values for one key are found. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. A :class:`MultiDict` can be constructed from an iterable of ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2 onwards some keyword parameters. :param mapping: the initial value for the :class:`MultiDict`. Either a regular dict, an iterable of ``(key, value)`` tuples or `None`. """ def __init__(self, mapping=None): if isinstance(mapping, MultiDict): dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping))) elif isinstance(mapping, dict): tmp = {} for key, value in iteritems(mapping): if isinstance(value, (tuple, list)): if len(value) == 0: continue value = list(value) else: value = [value] tmp[key] = value dict.__init__(self, tmp) else: tmp = {} for key, value in mapping or (): tmp.setdefault(key, []).append(value) dict.__init__(self, tmp) def __getstate__(self): return dict(self.lists()) def __setstate__(self, value): dict.clear(self) dict.update(self, value) def __getitem__(self, key): """Return the first data value for this key; raises KeyError if not found. :param key: The key to be looked up. :raise KeyError: if the key does not exist. """ if key in self: lst = dict.__getitem__(self, key) if len(lst) > 0: return lst[0] raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): """Like :meth:`add` but removes an existing key first. :param key: the key for the value. :param value: the value to set. """ dict.__setitem__(self, key, [value]) def add(self, key, value): """Adds a new value for the key. .. versionadded:: 0.6 :param key: the key for the value. :param value: the value to add. """ dict.setdefault(self, key, []).append(value) def getlist(self, key, type=None): """Return the list of items for a given key. If that key is not in the `MultiDict`, the return value will be an empty list. Just as `get` `getlist` accepts a `type` parameter. All items will be converted with the callable defined there. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`MultiDict`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. """ try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return list(rv) result = [] for item in rv: try: result.append(type(item)) except ValueError: pass return result def setlist(self, key, new_list): """Remove the old values for a key and add new ones. Note that the list you pass the values in will be shallow-copied before it is inserted in the dictionary. >>> d = MultiDict() >>> d.setlist('foo', ['1', '2']) >>> d['foo'] '1' >>> d.getlist('foo') ['1', '2'] :param key: The key for which the values are set. :param new_list: An iterable with the new values for the key. Old values are removed first. """ dict.__setitem__(self, key, list(new_list)) def setdefault(self, key, default=None): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key not in self: self[key] = default else: default = self[key] return default def setlistdefault(self, key, default_list=None): """Like `setdefault` but sets multiple values. The list returned is not a copy, but the list that is actually used internally. This means that you can put new values into the dict by appending items to the list: >>> d = MultiDict({"foo": 1}) >>> d.setlistdefault("foo").extend([2, 3]) >>> d.getlist("foo") [1, 2, 3] :param key: The key to be looked up. :param default: An iterable of default values. It is either copied (in case it was a list) or converted into a list before returned. :return: a :class:`list` """ if key not in self: default_list = list(default_list or ()) dict.__setitem__(self, key, default_list) else: default_list = dict.__getitem__(self, key) return default_list def items(self, multi=False): """Return an iterator of ``(key, value)`` pairs. :param multi: If set to `True` the iterator returned will have a pair for each value of each key. Otherwise it will only contain pairs for the first value of each key. """ for key, values in iteritems(dict, self): if multi: for value in values: yield key, value else: yield key, values[0] def lists(self): """Return a list of ``(key, values)`` pairs, where values is the list of all values associated with the key.""" for key, values in iteritems(dict, self): yield key, list(values) def keys(self): return iterkeys(dict, self) __iter__ = keys def values(self): """Returns an iterator of the first value on every key's value list.""" for values in itervalues(dict, self): yield values[0] def listvalues(self): """Return an iterator of all values associated with a key. Zipping :meth:`keys` and this is the same as calling :meth:`lists`: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> zip(d.keys(), d.listvalues()) == d.lists() True """ return itervalues(dict, self) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self) def deepcopy(self, memo=None): """Return a deep copy of this object.""" return self.__class__(deepcopy(self.to_dict(flat=False), memo)) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first value for each key. :return: a :class:`dict` """ if flat: return dict(iteritems(self)) return dict(self.lists()) def update(self, other_dict): """update() extends rather than replaces existing key lists: >>> a = MultiDict({'x': 1}) >>> b = MultiDict({'x': 2, 'y': 3}) >>> a.update(b) >>> a MultiDict([('y', 3), ('x', 1), ('x', 2)]) If the value list for a key in ``other_dict`` is empty, no new values will be added to the dict and the key will not be created: >>> x = {'empty_list': []} >>> y = MultiDict() >>> y.update(x) >>> y MultiDict([]) """ for key, value in iter_multi_items(other_dict): MultiDict.add(self, key, value) def pop(self, key, default=_missing): """Pop the first item for a list on the dict. Afterwards the key is removed from the dict, so additional values are discarded: >>> d = MultiDict({"foo": [1, 2, 3]}) >>> d.pop("foo") 1 >>> "foo" in d False :param key: the key to pop. :param default: if provided the value to return if the key was not in the dictionary. """ try: lst = dict.pop(self, key) if len(lst) == 0: raise exceptions.BadRequestKeyError() return lst[0] except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) def popitem(self): """Pop an item from the dict.""" try: item = dict.popitem(self) if len(item[1]) == 0: raise exceptions.BadRequestKeyError() return (item[0], item[1][0]) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def poplist(self, key): """Pop the list for a key from the dict. If the key is not in the dict an empty list is returned. .. versionchanged:: 0.5 If the key does no longer exist a list is returned instead of raising an error. """ return dict.pop(self, key, []) def popitemlist(self): """Pop a ``(key, list)`` tuple from the dict.""" try: return dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) def __copy__(self): return self.copy() def __deepcopy__(self, memo): return self.deepcopy(memo=memo) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True))) class _omd_bucket(object): """Wraps values in the :class:`OrderedMultiDict`. This makes it possible to keep an order over multiple different keys. It requires a lot of extra memory and slows down access a lot, but makes it possible to access elements in O(1) and iterate in O(n). """ __slots__ = ('prev', 'key', 'value', 'next') def __init__(self, omd, key, value): self.prev = omd._last_bucket self.key = key self.value = value self.next = None if omd._first_bucket is None: omd._first_bucket = self if omd._last_bucket is not None: omd._last_bucket.next = self omd._last_bucket = self def unlink(self, omd): if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev if omd._first_bucket is self: omd._first_bucket = self.next if omd._last_bucket is self: omd._last_bucket = self.prev @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class OrderedMultiDict(MultiDict): """Works like a regular :class:`MultiDict` but preserves the order of the fields. To convert the ordered multi dict into a list you can use the :meth:`items` method and pass it ``multi=True``. In general an :class:`OrderedMultiDict` is an order of magnitude slower than a :class:`MultiDict`. .. admonition:: note Due to a limitation in Python you cannot convert an ordered multi dict into a regular dict by using ``dict(multidict)``. Instead you have to use the :meth:`to_dict` method, otherwise the internal bucket objects are exposed. """ def __init__(self, mapping=None): dict.__init__(self) self._first_bucket = self._last_bucket = None if mapping is not None: OrderedMultiDict.update(self, mapping) def __eq__(self, other): if not isinstance(other, MultiDict): return NotImplemented if isinstance(other, OrderedMultiDict): iter1 = iteritems(self, multi=True) iter2 = iteritems(other, multi=True) try: for k1, v1 in iter1: k2, v2 = next(iter2) if k1 != k2 or v1 != v2: return False except StopIteration: return False try: next(iter2) except StopIteration: return True return False if len(self) != len(other): return False for key, values in iterlists(self): if other.getlist(key) != values: return False return True __hash__ = None def __ne__(self, other): return not self.__eq__(other) def __reduce_ex__(self, protocol): return type(self), (list(iteritems(self, multi=True)),) def __getstate__(self): return list(iteritems(self, multi=True)) def __setstate__(self, values): dict.clear(self) for key, value in values: self.add(key, value) def __getitem__(self, key): if key in self: return dict.__getitem__(self, key)[0].value raise exceptions.BadRequestKeyError(key) def __setitem__(self, key, value): self.poplist(key) self.add(key, value) def __delitem__(self, key): self.pop(key) def keys(self): return (key for key, value in iteritems(self)) __iter__ = keys def values(self): return (value for key, value in iteritems(self)) def items(self, multi=False): ptr = self._first_bucket if multi: while ptr is not None: yield ptr.key, ptr.value ptr = ptr.next else: returned_keys = set() while ptr is not None: if ptr.key not in returned_keys: returned_keys.add(ptr.key) yield ptr.key, ptr.value ptr = ptr.next def lists(self): returned_keys = set() ptr = self._first_bucket while ptr is not None: if ptr.key not in returned_keys: yield ptr.key, self.getlist(ptr.key) returned_keys.add(ptr.key) ptr = ptr.next def listvalues(self): for key, values in iterlists(self): yield values def add(self, key, value): dict.setdefault(self, key, []).append(_omd_bucket(self, key, value)) def getlist(self, key, type=None): try: rv = dict.__getitem__(self, key) except KeyError: return [] if type is None: return [x.value for x in rv] result = [] for item in rv: try: result.append(type(item.value)) except ValueError: pass return result def setlist(self, key, new_list): self.poplist(key) for value in new_list: self.add(key, value) def setlistdefault(self, key, default_list=None): raise TypeError('setlistdefault is unsupported for ' 'ordered multi dicts') def update(self, mapping): for key, value in iter_multi_items(mapping): OrderedMultiDict.add(self, key, value) def poplist(self, key): buckets = dict.pop(self, key, ()) for bucket in buckets: bucket.unlink(self) return [x.value for x in buckets] def pop(self, key, default=_missing): try: buckets = dict.pop(self, key) except KeyError as e: if default is not _missing: return default raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return buckets[0].value def popitem(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, buckets[0].value def popitemlist(self): try: key, buckets = dict.popitem(self) except KeyError as e: raise exceptions.BadRequestKeyError(str(e)) for bucket in buckets: bucket.unlink(self) return key, [x.value for x in buckets] def _options_header_vkw(value, kw): return dump_options_header(value, dict((k.replace('_', '-'), v) for k, v in kw.items())) def _unicodify_header_value(value): if isinstance(value, bytes): value = value.decode('latin-1') if not isinstance(value, text_type): value = text_type(value) return value @native_itermethods(['keys', 'values', 'items']) class Headers(object): """An object that stores some headers. It has a dict-like interface but is ordered and can store the same keys multiple times. This data structure is useful if you want a nicer way to handle WSGI headers which are stored as tuples in a list. From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is also a subclass of the :class:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers` class, with the exception of `__getitem__`. :mod:`wsgiref` will return `None` for ``headers['missing']``, whereas :class:`Headers` will raise a :class:`KeyError`. To create a new :class:`Headers` object pass it a list or dict of headers which are used as default values. This does not reuse the list passed to the constructor for internal usage. :param defaults: The list of default values for the :class:`Headers`. .. versionchanged:: 0.9 This data structure now stores unicode values similar to how the multi dicts do it. The main difference is that bytes can be set as well which will automatically be latin1 decoded. .. versionchanged:: 0.9 The :meth:`linked` function was removed without replacement as it was an API that does not support the changes to the encoding model. """ def __init__(self, defaults=None): self._list = [] if defaults is not None: if isinstance(defaults, (list, Headers)): self._list.extend(defaults) else: self.extend(defaults) def __getitem__(self, key, _get_mode=False): if not _get_mode: if isinstance(key, integer_types): return self._list[key] elif isinstance(key, slice): return self.__class__(self._list[key]) if not isinstance(key, string_types): raise exceptions.BadRequestKeyError(key) ikey = key.lower() for k, v in self._list: if k.lower() == ikey: return v # micro optimization: if we are in get mode we will catch that # exception one stack level down so we can raise a standard # key error instead of our special one. if _get_mode: raise KeyError() raise exceptions.BadRequestKeyError(key) def __eq__(self, other): return other.__class__ is self.__class__ and \ set(other._list) == set(self._list) __hash__ = None def __ne__(self, other): return not self.__eq__(other) def get(self, key, default=None, type=None, as_bytes=False): """Return the default value if the requested data doesn't exist. If `type` is provided and is a callable it should convert the value, return it or raise a :exc:`ValueError` if that is not possible. In this case the function will return the default as if the value was not found: >>> d = Headers([('Content-Length', '42')]) >>> d.get('Content-Length', type=int) 42 If a headers object is bound you must not add unicode strings because no encoding takes place. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param default: The default value to be returned if the key can't be looked up. If not further specified `None` is returned. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the default value is returned. :param as_bytes: return bytes instead of unicode strings. """ try: rv = self.__getitem__(key, _get_mode=True) except KeyError: return default if as_bytes: rv = rv.encode('latin1') if type is None: return rv try: return type(rv) except ValueError: return default def getlist(self, key, type=None, as_bytes=False): """Return the list of items for a given key. If that key is not in the :class:`Headers`, the return value will be an empty list. Just as :meth:`get` :meth:`getlist` accepts a `type` parameter. All items will be converted with the callable defined there. .. versionadded:: 0.9 Added support for `as_bytes`. :param key: The key to be looked up. :param type: A callable that is used to cast the value in the :class:`Headers`. If a :exc:`ValueError` is raised by this callable the value will be removed from the list. :return: a :class:`list` of all the values for the key. :param as_bytes: return bytes instead of unicode strings. """ ikey = key.lower() result = [] for k, v in self: if k.lower() == ikey: if as_bytes: v = v.encode('latin1') if type is not None: try: v = type(v) except ValueError: continue result.append(v) return result def get_all(self, name): """Return a list of all the values for the named field. This method is compatible with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.get_all` method. """ return self.getlist(name) def items(self, lower=False): for key, value in self: if lower: key = key.lower() yield key, value def keys(self, lower=False): for key, _ in iteritems(self, lower): yield key def values(self): for _, value in iteritems(self): yield value def extend(self, iterable): """Extend the headers with a dict or an iterable yielding keys and values. """ if isinstance(iterable, dict): for key, value in iteritems(iterable): if isinstance(value, (tuple, list)): for v in value: self.add(key, v) else: self.add(key, value) else: for key, value in iterable: self.add(key, value) def __delitem__(self, key, _index_operation=True): if _index_operation and isinstance(key, (integer_types, slice)): del self._list[key] return key = key.lower() new = [] for k, v in self._list: if k.lower() != key: new.append((k, v)) self._list[:] = new def remove(self, key): """Remove a key. :param key: The key to be removed. """ return self.__delitem__(key, _index_operation=False) def pop(self, key=None, default=_missing): """Removes and returns a key or index. :param key: The key to be popped. If this is an integer the item at that position is removed, if it's a string the value for that key is. If the key is omitted or `None` the last item is removed. :return: an item. """ if key is None: return self._list.pop() if isinstance(key, integer_types): return self._list.pop(key) try: rv = self[key] self.remove(key) except KeyError: if default is not _missing: return default raise return rv def popitem(self): """Removes a key or index and returns a (key, value) item.""" return self.pop() def __contains__(self, key): """Check if a key is present.""" try: self.__getitem__(key, _get_mode=True) except KeyError: return False return True has_key = __contains__ def __iter__(self): """Yield ``(key, value)`` tuples.""" return iter(self._list) def __len__(self): return len(self._list) def add(self, _key, _value, **kw): """Add a new header tuple to the list. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes:: >>> d = Headers() >>> d.add('Content-Type', 'text/plain') >>> d.add('Content-Disposition', 'attachment', filename='foo.png') The keyword argument dumping uses :func:`dump_options_header` behind the scenes. .. versionadded:: 0.4.1 keyword arguments were added for :mod:`wsgiref` compatibility. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) self._list.append((_key, _value)) def _validate_value(self, value): if not isinstance(value, text_type): raise TypeError('Value should be unicode.') if u'\n' in value or u'\r' in value: raise ValueError('Detected newline in header value. This is ' 'a potential security problem') def add_header(self, _key, _value, **_kw): """Add a new header tuple to the list. An alias for :meth:`add` for compatibility with the :mod:`wsgiref` :meth:`~wsgiref.headers.Headers.add_header` method. """ self.add(_key, _value, **_kw) def clear(self): """Clears all headers.""" del self._list[:] def set(self, _key, _value, **kw): """Remove all header tuples for `key` and add a new one. The newly added key either appears at the end of the list if there was no entry or replaces the first one. Keyword arguments can specify additional parameters for the header value, with underscores converted to dashes. See :meth:`add` for more information. .. versionchanged:: 0.6.1 :meth:`set` now accepts the same arguments as :meth:`add`. :param key: The key to be inserted. :param value: The value to be inserted. """ if kw: _value = _options_header_vkw(_value, kw) _value = _unicodify_header_value(_value) self._validate_value(_value) if not self._list: self._list.append((_key, _value)) return listiter = iter(self._list) ikey = _key.lower() for idx, (old_key, old_value) in enumerate(listiter): if old_key.lower() == ikey: # replace first ocurrence self._list[idx] = (_key, _value) break else: self._list.append((_key, _value)) return self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey] def setdefault(self, key, value): """Returns the value for the key if it is in the dict, otherwise it returns `default` and sets that value for `key`. :param key: The key to be looked up. :param default: The default value to be returned if the key is not in the dict. If not further specified it's `None`. """ if key in self: return self[key] self.set(key, value) return value def __setitem__(self, key, value): """Like :meth:`set` but also supports index/slice based setting.""" if isinstance(key, (slice, integer_types)): if isinstance(key, integer_types): value = [value] value = [(k, _unicodify_header_value(v)) for (k, v) in value] [self._validate_value(v) for (k, v) in value] if isinstance(key, integer_types): self._list[key] = value[0] else: self._list[key] = value else: self.set(key, value) def to_list(self, charset='iso-8859-1'): """Convert the headers into a list suitable for WSGI.""" from warnings import warn warn(DeprecationWarning('Method removed, use to_wsgi_list instead'), stacklevel=2) return self.to_wsgi_list() def to_wsgi_list(self): """Convert the headers into a list suitable for WSGI. The values are byte strings in Python 2 converted to latin1 and unicode strings in Python 3 for the WSGI server to encode. :return: list """ if PY2: return [(to_native(k), v.encode('latin1')) for k, v in self] return list(self) def copy(self): return self.__class__(self._list) def __copy__(self): return self.copy() def __str__(self): """Returns formatted headers suitable for HTTP transmission.""" strs = [] for key, value in self.to_wsgi_list(): strs.append('%s: %s' % (key, value)) strs.append('\r\n') return '\r\n'.join(strs) def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, list(self) ) class ImmutableHeadersMixin(object): """Makes a :class:`Headers` immutable. We do not mark them as hashable though since the only usecase for this datastructure in Werkzeug is a view on a mutable structure. .. versionadded:: 0.5 :private: """ def __delitem__(self, key): is_immutable(self) def __setitem__(self, key, value): is_immutable(self) set = __setitem__ def add(self, item): is_immutable(self) remove = add_header = add def extend(self, iterable): is_immutable(self) def insert(self, pos, value): is_immutable(self) def pop(self, index=-1): is_immutable(self) def popitem(self): is_immutable(self) def setdefault(self, key, default): is_immutable(self) class EnvironHeaders(ImmutableHeadersMixin, Headers): """Read only version of the headers from a WSGI environment. This provides the same interface as `Headers` and is constructed from a WSGI environment. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __init__(self, environ): self.environ = environ def __eq__(self, other): return self.environ is other.environ __hash__ = None def __getitem__(self, key, _get_mode=False): # _get_mode is a no-op for this class as there is no index but # used because get() calls it. key = key.upper().replace('-', '_') if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): return _unicodify_header_value(self.environ[key]) return _unicodify_header_value(self.environ['HTTP_' + key]) def __len__(self): # the iter is necessary because otherwise list calls our # len which would call list again and so forth. return len(list(iter(self))) def __iter__(self): for key, value in iteritems(self.environ): if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield (key[5:].replace('_', '-').title(), _unicodify_header_value(value)) elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield (key.replace('_', '-').title(), _unicodify_header_value(value)) def copy(self): raise TypeError('cannot create %r copies' % self.__class__.__name__) @native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues']) class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict): """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict` instances as sequence and it will combine the return values of all wrapped dicts: >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict >>> post = MultiDict([('foo', 'bar')]) >>> get = MultiDict([('blub', 'blah')]) >>> combined = CombinedMultiDict([get, post]) >>> combined['foo'] 'bar' >>> combined['blub'] 'blah' This works for all read operations and will raise a `TypeError` for methods that usually change data which isn't possible. From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP exceptions. """ def __reduce_ex__(self, protocol): return type(self), (self.dicts,) def __init__(self, dicts=None): self.dicts = dicts or [] @classmethod def fromkeys(cls): raise TypeError('cannot create %r instances by fromkeys' % cls.__name__) def __getitem__(self, key): for d in self.dicts: if key in d: return d[key] raise exceptions.BadRequestKeyError(key) def get(self, key, default=None, type=None): for d in self.dicts: if key in d: if type is not None: try: return type(d[key]) except ValueError: continue return d[key] return default def getlist(self, key, type=None): rv = [] for d in self.dicts: rv.extend(d.getlist(key, type)) return rv def _keys_impl(self): """This function exists so __len__ can be implemented more efficiently, saving one list creation from an iterator. Using this for Python 2's ``dict.keys`` behavior would be useless since `dict.keys` in Python 2 returns a list, while we have a set here. """ rv = set() for d in self.dicts: rv.update(iterkeys(d)) return rv def keys(self): return iter(self._keys_impl()) __iter__ = keys def items(self, multi=False): found = set() for d in self.dicts: for key, value in iteritems(d, multi): if multi: yield key, value elif key not in found: found.add(key) yield key, value def values(self): for key, value in iteritems(self): yield value def lists(self): rv = {} for d in self.dicts: for key, values in iterlists(d): rv.setdefault(key, []).extend(values) return iteritems(rv) def listvalues(self): return (x[1] for x in self.lists()) def copy(self): """Return a shallow copy of this object.""" return self.__class__(self.dicts[:]) def to_dict(self, flat=True): """Return the contents as regular dict. If `flat` is `True` the returned dict will only have the first item present, if `flat` is `False` all values will be returned as lists. :param flat: If set to `False` the dict returned will have lists with all the values in it. Otherwise it will only contain the first item for each key. :return: a :class:`dict` """ rv = {} for d in reversed(self.dicts): rv.update(d.to_dict(flat)) return rv def __len__(self): return len(self._keys_impl()) def __contains__(self, key): for d in self.dicts: if key in d: return True return False has_key = __contains__ def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.dicts) class FileMultiDict(MultiDict): """A special :class:`MultiDict` that has convenience methods to add files to it. This is used for :class:`EnvironBuilder` and generally useful for unittesting. .. versionadded:: 0.5 """ def add_file(self, name, file, filename=None, content_type=None): """Adds a new file to the dict. `file` can be a file name or a :class:`file`-like or a :class:`FileStorage` object. :param name: the name of the field. :param file: a filename or :class:`file`-like object :param filename: an optional filename :param content_type: an optional content type """ if isinstance(file, FileStorage): value = file else: if isinstance(file, string_types): if filename is None: filename = file file = open(file, 'rb') if filename and content_type is None: content_type = mimetypes.guess_type(filename)[0] or \ 'application/octet-stream' value = FileStorage(file, filename, name, content_type) self.add(name, value) class ImmutableDict(ImmutableDictMixin, dict): """An immutable :class:`dict`. .. versionadded:: 0.5 """ def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, dict.__repr__(self), ) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return dict(self) def __copy__(self): return self class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict): """An immutable :class:`MultiDict`. .. versionadded:: 0.5 """ def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return MultiDict(self) def __copy__(self): return self class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict): """An immutable :class:`OrderedMultiDict`. .. versionadded:: 0.6 """ def _iter_hashitems(self): return enumerate(iteritems(self, multi=True)) def copy(self): """Return a shallow mutable copy of this object. Keep in mind that the standard library's :func:`copy` function is a no-op for this class like for any other python immutable type (eg: :class:`tuple`). """ return OrderedMultiDict(self) def __copy__(self): return self @native_itermethods(['values']) class Accept(ImmutableList): """An :class:`Accept` object is just a list subclass for lists of ``(value, quality)`` tuples. It is automatically sorted by quality. All :class:`Accept` objects work similar to a list but provide extra functionality for working with the data. Containment checks are normalized to the rules of that header: >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)]) >>> a.best 'ISO-8859-1' >>> 'iso-8859-1' in a True >>> 'UTF8' in a True >>> 'utf7' in a False To get the quality for an item you can use normal item lookup: >>> print a['utf-8'] 0.7 >>> a['utf7'] 0 .. versionchanged:: 0.5 :class:`Accept` objects are forced immutable now. """ def __init__(self, values=()): if values is None: list.__init__(self) self.provided = False elif isinstance(values, Accept): self.provided = values.provided list.__init__(self, values) else: self.provided = True values = sorted(values, key=lambda x: (x[1], x[0]), reverse=True) list.__init__(self, values) def _value_matches(self, value, item): """Check if a value matches a given accept item.""" return item == '*' or item.lower() == value.lower() def __getitem__(self, key): """Besides index lookup (getting item n) you can also pass it a string to get the quality for the item. If the item is not in the list, the returned quality is ``0``. """ if isinstance(key, string_types): return self.quality(key) return list.__getitem__(self, key) def quality(self, key): """Returns the quality of the key. .. versionadded:: 0.6 In previous versions you had to use the item-lookup syntax (eg: ``obj[key]`` instead of ``obj.quality(key)``) """ for item, quality in self: if self._value_matches(key, item): return quality return 0 def __contains__(self, value): for item, quality in self: if self._value_matches(value, item): return True return False def __repr__(self): return '%s([%s])' % ( self.__class__.__name__, ', '.join('(%r, %s)' % (x, y) for x, y in self) ) def index(self, key): """Get the position of an entry or raise :exc:`ValueError`. :param key: The key to be looked up. .. versionchanged:: 0.5 This used to raise :exc:`IndexError`, which was inconsistent with the list API. """ if isinstance(key, string_types): for idx, (item, quality) in enumerate(self): if self._value_matches(key, item): return idx raise ValueError(key) return list.index(self, key) def find(self, key): """Get the position of an entry or return -1. :param key: The key to be looked up. """ try: return self.index(key) except ValueError: return -1 def values(self): """Iterate over all values.""" for item in self: yield item[0] def to_header(self): """Convert the header set into an HTTP header string.""" result = [] for value, quality in self: if quality != 1: value = '%s;q=%s' % (value, quality) result.append(value) return ','.join(result) def __str__(self): return self.to_header() def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ best_quality = -1 result = default for server_item in matches: for client_item, quality in self: if quality <= best_quality: break if self._value_matches(server_item, client_item) \ and quality > 0: best_quality = quality result = server_item return result @property def best(self): """The best match as value.""" if self: return self[0][0] class MIMEAccept(Accept): """Like :class:`Accept` but with special methods and behavior for mimetypes. """ def _value_matches(self, value, item): def _normalize(x): x = x.lower() return x == '*' and ('*', '*') or x.split('/', 1) # this is from the application which is trusted. to avoid developer # frustration we actually check these for valid values if '/' not in value: raise ValueError('invalid mimetype %r' % value) value_type, value_subtype = _normalize(value) if value_type == '*' and value_subtype != '*': raise ValueError('invalid mimetype %r' % value) if '/' not in item: return False item_type, item_subtype = _normalize(item) if item_type == '*' and item_subtype != '*': return False return ( (item_type == item_subtype == '*' or value_type == value_subtype == '*') or (item_type == value_type and (item_subtype == '*' or value_subtype == '*' or item_subtype == value_subtype)) ) @property def accept_html(self): """True if this object accepts HTML.""" return ( 'text/html' in self or 'application/xhtml+xml' in self or self.accept_xhtml ) @property def accept_xhtml(self): """True if this object accepts XHTML.""" return ( 'application/xhtml+xml' in self or 'application/xml' in self ) @property def accept_json(self): """True if this object accepts JSON.""" return 'application/json' in self class LanguageAccept(Accept): """Like :class:`Accept` but with normalization for languages.""" def _value_matches(self, value, item): def _normalize(language): return _locale_delim_re.split(language.lower()) return item == '*' or _normalize(value) == _normalize(item) class CharsetAccept(Accept): """Like :class:`Accept` but with normalization for charsets.""" def _value_matches(self, value, item): def _normalize(name): try: return codecs.lookup(name).name except LookupError: return name.lower() return item == '*' or _normalize(value) == _normalize(item) def cache_property(key, empty, type): """Return a new property object for a cache header. Useful if you want to add support for a cache extension in a subclass.""" return property(lambda x: x._get_cache_value(key, empty, type), lambda x, v: x._set_cache_value(key, v, type), lambda x: x._del_cache_value(key), 'accessor for %r' % key) class _CacheControl(UpdateDictMixin, dict): """Subclass of a dict that stores values for a Cache-Control header. It has accessors for all the cache-control directives specified in RFC 2616. The class does not differentiate between request and response directives. Because the cache-control directives in the HTTP header use dashes the python descriptors use underscores for that. To get a header of the :class:`CacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionchanged:: 0.4 Setting `no_cache` or `private` to boolean `True` will set the implicit none-value which is ``*``: >>> cc = ResponseCacheControl() >>> cc.no_cache = True >>> cc <ResponseCacheControl 'no-cache'> >>> cc.no_cache '*' >>> cc.no_cache = None >>> cc <ResponseCacheControl ''> In versions before 0.5 the behavior documented here affected the now no longer existing `CacheControl` class. """ no_cache = cache_property('no-cache', '*', None) no_store = cache_property('no-store', None, bool) max_age = cache_property('max-age', -1, int) no_transform = cache_property('no-transform', None, None) def __init__(self, values=(), on_update=None): dict.__init__(self, values or ()) self.on_update = on_update self.provided = values is not None def _get_cache_value(self, key, empty, type): """Used internally by the accessor properties.""" if type is bool: return key in self if key in self: value = self[key] if value is None: return empty elif type is not None: try: value = type(value) except ValueError: pass return value def _set_cache_value(self, key, value, type): """Used internally by the accessor properties.""" if type is bool: if value: self[key] = None else: self.pop(key, None) else: if value is None: self.pop(key) elif value is True: self[key] = None else: self[key] = value def _del_cache_value(self, key): """Used internally by the accessor properties.""" if key in self: del self[key] def to_header(self): """Convert the stored values into a cache control header.""" return dump_header(self) def __str__(self): return self.to_header() def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, " ".join( "%s=%r" % (k, v) for k, v in sorted(self.items()) ), ) class RequestCacheControl(ImmutableDictMixin, _CacheControl): """A cache control for requests. This is immutable and gives access to all the request-relevant cache control headers. To get a header of the :class:`RequestCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ max_stale = cache_property('max-stale', '*', int) min_fresh = cache_property('min-fresh', '*', int) no_transform = cache_property('no-transform', None, None) only_if_cached = cache_property('only-if-cached', None, bool) class ResponseCacheControl(_CacheControl): """A cache control for responses. Unlike :class:`RequestCacheControl` this is mutable and gives access to response-relevant cache control headers. To get a header of the :class:`ResponseCacheControl` object again you can convert the object into a string or call the :meth:`to_header` method. If you plan to subclass it and add your own items have a look at the sourcecode for that class. .. versionadded:: 0.5 In previous versions a `CacheControl` class existed that was used both for request and response. """ public = cache_property('public', None, bool) private = cache_property('private', '*', None) must_revalidate = cache_property('must-revalidate', None, bool) proxy_revalidate = cache_property('proxy-revalidate', None, bool) s_maxage = cache_property('s-maxage', None, None) # attach cache_property to the _CacheControl as staticmethod # so that others can reuse it. _CacheControl.cache_property = staticmethod(cache_property) class CallbackDict(UpdateDictMixin, dict): """A dict that calls a function passed every time something is changed. The function is passed the dict instance. """ def __init__(self, initial=None, on_update=None): dict.__init__(self, initial or ()) self.on_update = on_update def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, dict.__repr__(self) ) class HeaderSet(MutableSet): """Similar to the :class:`ETags` class this implements a set-like structure. Unlike :class:`ETags` this is case insensitive and used for vary, allow, and content-language headers. If not constructed using the :func:`parse_set_header` function the instantiation works like this: >>> hs = HeaderSet(['foo', 'bar', 'baz']) >>> hs HeaderSet(['foo', 'bar', 'baz']) """ def __init__(self, headers=None, on_update=None): self._headers = list(headers or ()) self._set = set([x.lower() for x in self._headers]) self.on_update = on_update def add(self, header): """Add a new header to the set.""" self.update((header,)) def remove(self, header): """Remove a header from the set. This raises an :exc:`KeyError` if the header is not in the set. .. versionchanged:: 0.5 In older versions a :exc:`IndexError` was raised instead of a :exc:`KeyError` if the object was missing. :param header: the header to be removed. """ key = header.lower() if key not in self._set: raise KeyError(header) self._set.remove(key) for idx, key in enumerate(self._headers): if key.lower() == header: del self._headers[idx] break if self.on_update is not None: self.on_update(self) def update(self, iterable): """Add all the headers from the iterable to the set. :param iterable: updates the set with the items from the iterable. """ inserted_any = False for header in iterable: key = header.lower() if key not in self._set: self._headers.append(header) self._set.add(key) inserted_any = True if inserted_any and self.on_update is not None: self.on_update(self) def discard(self, header): """Like :meth:`remove` but ignores errors. :param header: the header to be discarded. """ try: return self.remove(header) except KeyError: pass def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1 def index(self, header): """Return the index of the header in the set or raise an :exc:`IndexError`. :param header: the header to be looked up. """ rv = self.find(header) if rv < 0: raise IndexError(header) return rv def clear(self): """Clear the set.""" self._set.clear() del self._headers[:] if self.on_update is not None: self.on_update(self) def as_set(self, preserve_casing=False): """Return the set as real python set type. When calling this, all the items are converted to lowercase and the ordering is lost. :param preserve_casing: if set to `True` the items in the set returned will have the original case like in the :class:`HeaderSet`, otherwise they will be lowercase. """ if preserve_casing: return set(self._headers) return set(self._set) def to_header(self): """Convert the header set into an HTTP header string.""" return ', '.join(map(quote_header_value, self._headers)) def __getitem__(self, idx): return self._headers[idx] def __delitem__(self, idx): rv = self._headers.pop(idx) self._set.remove(rv.lower()) if self.on_update is not None: self.on_update(self) def __setitem__(self, idx, value): old = self._headers[idx] self._set.remove(old.lower()) self._headers[idx] = value self._set.add(value.lower()) if self.on_update is not None: self.on_update(self) def __contains__(self, header): return header.lower() in self._set def __len__(self): return len(self._set) def __iter__(self): return iter(self._headers) def __nonzero__(self): return bool(self._set) def __str__(self): return self.to_header() def __repr__(self): return '%s(%r)' % ( self.__class__.__name__, self._headers ) class ETags(Container, Iterable): """A set that can be used to check if one etag is present in a collection of etags. """ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False): self._strong = frozenset(not star_tag and strong_etags or ()) self._weak = frozenset(weak_etags or ()) self.star_tag = star_tag def as_set(self, include_weak=False): """Convert the `ETags` object into a python set. Per default all the weak etags are not part of this set.""" rv = set(self._strong) if include_weak: rv.update(self._weak) return rv def is_weak(self, etag): """Check if an etag is weak.""" return etag in self._weak def contains_weak(self, etag): """Check if an etag is part of the set including weak and strong tags.""" return self.is_weak(etag) or self.contains(etag) def contains(self, etag): """Check if an etag is part of the set ignoring weak tags. It is also possible to use the ``in`` operator. """ if self.star_tag: return True return etag in self._strong def contains_raw(self, etag): """When passed a quoted tag it will check if this tag is part of the set. If the tag is weak it is checked against weak and strong tags, otherwise strong only.""" etag, weak = unquote_etag(etag) if weak: return self.contains_weak(etag) return self.contains(etag) def to_header(self): """Convert the etags set into a HTTP header string.""" if self.star_tag: return '*' return ', '.join( ['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak] ) def __call__(self, etag=None, data=None, include_weak=False): if [etag, data].count(None) != 1: raise TypeError('either tag or data required, but at least one') if etag is None: etag = generate_etag(data) if include_weak: if etag in self._weak: return True return etag in self._strong def __bool__(self): return bool(self.star_tag or self._strong or self._weak) __nonzero__ = __bool__ def __str__(self): return self.to_header() def __iter__(self): return iter(self._strong) def __contains__(self, etag): return self.contains(etag) def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class IfRange(object): """Very simple object that represents the `If-Range` header in parsed form. It will either have neither a etag or date or one of either but never both. .. versionadded:: 0.7 """ def __init__(self, etag=None, date=None): #: The etag parsed and unquoted. Ranges always operate on strong #: etags so the weakness information is not necessary. self.etag = etag #: The date in parsed format or `None`. self.date = date def to_header(self): """Converts the object back into an HTTP header.""" if self.date is not None: return http_date(self.date) if self.etag is not None: return quote_etag(self.etag) return '' def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Range(object): """Represents a range header. All the methods are only supporting bytes as unit. It does store multiple ranges but :meth:`range_for_length` will only work if only one range is provided. .. versionadded:: 0.7 """ def __init__(self, units, ranges): #: The units of this range. Usually "bytes". self.units = units #: A list of ``(begin, end)`` tuples for the range header provided. #: The ranges are non-inclusive. self.ranges = ranges def range_for_length(self, length): """If the range is for bytes, the length is not None and there is exactly one range and it is satisfiable it returns a ``(start, stop)`` tuple, otherwise `None`. """ if self.units != 'bytes' or length is None or len(self.ranges) != 1: return None start, end = self.ranges[0] if end is None: end = length if start < 0: start += length if is_byte_range_valid(start, end, length): return start, min(end, length) def make_content_range(self, length): """Creates a :class:`~werkzeug.datastructures.ContentRange` object from the current range and given content length. """ rng = self.range_for_length(length) if rng is not None: return ContentRange(self.units, rng[0], rng[1], length) def to_header(self): """Converts the object back into an HTTP header.""" ranges = [] for begin, end in self.ranges: if end is None: ranges.append(begin >= 0 and '%s-' % begin or str(begin)) else: ranges.append('%s-%s' % (begin, end - 1)) return '%s=%s' % (self.units, ','.join(ranges)) def to_content_range_header(self, length): """Converts the object into `Content-Range` HTTP header, based on given length """ range_for_length = self.range_for_length(length) if range_for_length is not None: return '%s %d-%d/%d' % (self.units, range_for_length[0], range_for_length[1] - 1, length) return None def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class ContentRange(object): """Represents the content range header. .. versionadded:: 0.7 """ def __init__(self, units, start, stop, length=None, on_update=None): assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self.on_update = on_update self.set(start, stop, length, units) def _callback_property(name): def fget(self): return getattr(self, name) def fset(self, value): setattr(self, name, value) if self.on_update is not None: self.on_update(self) return property(fget, fset) #: The units to use, usually "bytes" units = _callback_property('_units') #: The start point of the range or `None`. start = _callback_property('_start') #: The stop point of the range (non-inclusive) or `None`. Can only be #: `None` if also start is `None`. stop = _callback_property('_stop') #: The length of the range or `None`. length = _callback_property('_length') def set(self, start, stop, length=None, units='bytes'): """Simple method to update the ranges.""" assert is_byte_range_valid(start, stop, length), \ 'Bad range provided' self._units = units self._start = start self._stop = stop self._length = length if self.on_update is not None: self.on_update(self) def unset(self): """Sets the units to `None` which indicates that the header should no longer be used. """ self.set(None, None, units=None) def to_header(self): if self.units is None: return '' if self.length is None: length = '*' else: length = self.length if self.start is None: return '%s */%s' % (self.units, length) return '%s %s-%s/%s' % ( self.units, self.start, self.stop - 1, length ) def __nonzero__(self): return self.units is not None __bool__ = __nonzero__ def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % (self.__class__.__name__, str(self)) class Authorization(ImmutableDictMixin, dict): """Represents an `Authorization` header sent by the client. You should not create this kind of object yourself but use it when it's returned by the `parse_authorization_header` function. This object is a dict subclass and can be altered by setting dict items but it should be considered immutable as it's returned by the client and not meant for modifications. .. versionchanged:: 0.5 This object became immutable. """ def __init__(self, auth_type, data=None): dict.__init__(self, data or {}) self.type = auth_type username = property(lambda x: x.get('username'), doc=''' The username transmitted. This is set for both basic and digest auth all the time.''') password = property(lambda x: x.get('password'), doc=''' When the authentication type is basic this is the password transmitted by the client, else `None`.''') realm = property(lambda x: x.get('realm'), doc=''' This is the server realm sent back for HTTP digest auth.''') nonce = property(lambda x: x.get('nonce'), doc=''' The nonce the server sent for digest auth, sent back by the client. A nonce should be unique for every 401 response for HTTP digest auth.''') uri = property(lambda x: x.get('uri'), doc=''' The URI from Request-URI of the Request-Line; duplicated because proxies are allowed to change the Request-Line in transit. HTTP digest auth only.''') nc = property(lambda x: x.get('nc'), doc=''' The nonce count value transmitted by clients if a qop-header is also transmitted. HTTP digest auth only.''') cnonce = property(lambda x: x.get('cnonce'), doc=''' If the server sent a qop-header in the ``WWW-Authenticate`` header, the client has to provide this value for HTTP digest auth. See the RFC for more details.''') response = property(lambda x: x.get('response'), doc=''' A string of 32 hex digits computed as defined in RFC 2617, which proves that the user knows a password. Digest auth only.''') opaque = property(lambda x: x.get('opaque'), doc=''' The opaque header from the server returned unchanged by the client. It is recommended that this string be base64 or hexadecimal data. Digest auth only.''') @property def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update) class WWWAuthenticate(UpdateDictMixin, dict): """Provides simple access to `WWW-Authenticate` headers.""" #: list of keys that require quoting in the generated header _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop']) def __init__(self, auth_type=None, values=None, on_update=None): dict.__init__(self, values or ()) if auth_type: self['__auth_type__'] = auth_type self.on_update = on_update def set_basic(self, realm='authentication required'): """Clear the auth info and enable basic auth.""" dict.clear(self) dict.update(self, {'__auth_type__': 'basic', 'realm': realm}) if self.on_update: self.on_update(self) def set_digest(self, realm, nonce, qop=('auth',), opaque=None, algorithm=None, stale=False): """Clear the auth info and enable digest auth.""" d = { '__auth_type__': 'digest', 'realm': realm, 'nonce': nonce, 'qop': dump_header(qop) } if stale: d['stale'] = 'TRUE' if opaque is not None: d['opaque'] = opaque if algorithm is not None: d['algorithm'] = algorithm dict.clear(self) dict.update(self, d) if self.on_update: self.on_update(self) def to_header(self): """Convert the stored values into a WWW-Authenticate header.""" d = dict(self) auth_type = d.pop('__auth_type__', None) or 'basic' return '%s %s' % (auth_type.title(), ', '.join([ '%s=%s' % (key, quote_header_value(value, allow_token=key not in self._require_quoting)) for key, value in iteritems(d) ])) def __str__(self): return self.to_header() def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.to_header() ) def auth_property(name, doc=None): """A static helper function for subclasses to add extra authentication system properties onto a class:: class FooAuthenticate(WWWAuthenticate): special_realm = auth_property('special_realm') For more information have a look at the sourcecode to see how the regular properties (:attr:`realm` etc.) are implemented. """ def _set_value(self, value): if value is None: self.pop(name, None) else: self[name] = str(value) return property(lambda x: x.get(name), _set_value, doc=doc) def _set_property(name, doc=None): def fget(self): def on_update(header_set): if not header_set and name in self: del self[name] elif header_set: self[name] = header_set.to_header() return parse_set_header(self.get(name), on_update) return property(fget, doc=doc) type = auth_property('__auth_type__', doc=''' The type of the auth mechanism. HTTP currently specifies `Basic` and `Digest`.''') realm = auth_property('realm', doc=''' A string to be displayed to users so they know which username and password to use. This string should contain at least the name of the host performing the authentication and might additionally indicate the collection of users who might have access.''') domain = _set_property('domain', doc=''' A list of URIs that define the protection space. If a URI is an absolute path, it is relative to the canonical root URL of the server being accessed.''') nonce = auth_property('nonce', doc=''' A server-specified data string which should be uniquely generated each time a 401 response is made. It is recommended that this string be base64 or hexadecimal data.''') opaque = auth_property('opaque', doc=''' A string of data, specified by the server, which should be returned by the client unchanged in the Authorization header of subsequent requests with URIs in the same protection space. It is recommended that this string be base64 or hexadecimal data.''') algorithm = auth_property('algorithm', doc=''' A string indicating a pair of algorithms used to produce the digest and a checksum. If this is not present it is assumed to be "MD5". If the algorithm is not understood, the challenge should be ignored (and a different one used, if there is more than one).''') qop = _set_property('qop', doc=''' A set of quality-of-privacy directives such as auth and auth-int.''') def _get_stale(self): val = self.get('stale') if val is not None: return val.lower() == 'true' def _set_stale(self, value): if value is None: self.pop('stale', None) else: self['stale'] = value and 'TRUE' or 'FALSE' stale = property(_get_stale, _set_stale, doc=''' A flag, indicating that the previous request from the client was rejected because the nonce value was stale.''') del _get_stale, _set_stale # make auth_property a staticmethod so that subclasses of # `WWWAuthenticate` can use it for new properties. auth_property = staticmethod(auth_property) del _set_property class FileStorage(object): """The :class:`FileStorage` class is a thin wrapper over incoming files. It is used by the request object to represent uploaded files. All the attributes of the wrapper stream are proxied by the file storage so it's possible to do ``storage.read()`` instead of the long form ``storage.stream.read()``. """ def __init__(self, stream=None, filename=None, name=None, content_type=None, content_length=None, headers=None): self.name = name self.stream = stream or _empty_stream # if no filename is provided we can attempt to get the filename # from the stream object passed. There we have to be careful to # skip things like <fdopen>, <stderr> etc. Python marks these # special filenames with angular brackets. if filename is None: filename = getattr(stream, 'name', None) s = make_literal_wrapper(filename) if filename and filename[0] == s('<') and filename[-1] == s('>'): filename = None # On Python 3 we want to make sure the filename is always unicode. # This might not be if the name attribute is bytes due to the # file being opened from the bytes API. if not PY2 and isinstance(filename, bytes): filename = filename.decode(get_filesystem_encoding(), 'replace') self.filename = filename if headers is None: headers = Headers() self.headers = headers if content_type is not None: headers['Content-Type'] = content_type if content_length is not None: headers['Content-Length'] = str(content_length) def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.content_type) @property def content_type(self): """The content-type sent in the header. Usually not available""" return self.headers.get('content-type') @property def content_length(self): """The content-length sent in the header. Usually not available""" return int(self.headers.get('content-length') or 0) @property def mimetype(self): """Like :attr:`content_type`, but without parameters (eg, without charset, type etc.) and always lowercase. For example if the content type is ``text/HTML; charset=utf-8`` the mimetype would be ``'text/html'``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[0].lower() @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.7 """ self._parse_content_type() return self._parsed_content_type[1] def save(self, dst, buffer_size=16384): """Save the file to a destination path or file object. If the destination is a file object you have to close it yourself after the call. The buffer size is the number of bytes held in memory during the copy process. It defaults to 16KB. For secure file saving also have a look at :func:`secure_filename`. :param dst: a filename or open file object the uploaded file is saved to. :param buffer_size: the size of the buffer. This works the same as the `length` parameter of :func:`shutil.copyfileobj`. """ from shutil import copyfileobj close_dst = False if isinstance(dst, string_types): dst = open(dst, 'wb') close_dst = True try: copyfileobj(self.stream, dst, buffer_size) finally: if close_dst: dst.close() def close(self): """Close the underlying file if possible.""" try: self.stream.close() except Exception: pass def __nonzero__(self): return bool(self.filename) __bool__ = __nonzero__ def __getattr__(self, name): return getattr(self.stream, name) def __iter__(self): return iter(self.stream) def __repr__(self): return '<%s: %r (%r)>' % ( self.__class__.__name__, self.filename, self.content_type ) # circular dependencies from werkzeug.http import dump_options_header, dump_header, generate_etag, \ quote_header_value, parse_set_header, unquote_etag, quote_etag, \ parse_options_header, http_date, is_byte_range_valid from werkzeug import exceptions
eliasrg/SURF2017
refs/heads/master
interactive.py
1
# Copyright (c) 2017 Elias Riedel Gårding # Licensed under the MIT License import sys, os sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'code'))) from itertools import islice, count import numpy as np import scipy.stats as st import matplotlib.pyplot as plt from scipy.integrate import quad from collections import defaultdict from simulation import Simulation, Parameters from measurements import Measurement from plotting import plot_lloyd_max, plot_lloyd_max_tracker, \ plot_lloyd_max_hikmet, plot_lloyd_max_tracker_hikmet, \ plot_spiral, plot_spiral_decode import separate.coding.source.lloyd_max as lm from separate.coding.convolutional import ConvolutionalCode, Node, \ NaiveMLDecoder, StackDecoder import separate.coding.PAM as PAM from utilities import * from joint.coding import SpiralMap n_runs = 1 << 0 T = 1 << 7 params = None # SNR_dB = 4.5 # SNR = 10**(SNR_dB / 10) # params = Parameters( # T = T, # alpha = 1.2, # W = 1, V = 0, # Lloyd-Max paper assumes no observation noise # Q = 1, R = 0, F = 1) # params.setRates(KC = 2, KS = 1) # params.setAnalog(SNR) # params.setScheme('joint') # print("SDR0 = {}".format(params.SDR0)) # params.setDigital(quantizer_bits = 1) # params.setScheme('lloyd-max') # code_blocklength = None # params.setDigital(quantizer_bits = 1, p = 0.001) # params.setBlocklength(2) # params.setScheme('noisy-lloyd-max') # params.set_random_code() # params.setRates(KC = 2, KS = 1) # params.setAnalog(SNR) # params.quantizer_bits = 1 # params.setBlocklength(2) # params.set_PAM() # params.setScheme('separate') # params.set_random_code() def generate_filename(SNR_dB, alpha, i, quantizer_bits=1, code_blocklength=1): if SNR_dB == 'noiseless': filename_pattern = \ 'data/separate/varying-SNR/alpha{}/noiseless/noiseless--{{}}.p' \ .format(alpha) else: filename_pattern = 'data/separate/varying-SNR/alpha{}/{}:{}/{}dB--{{}}.p' \ .format(alpha, quantizer_bits, code_blocklength, SNR_dB) return filename_pattern.format(i) def load_measurements(SNR_dB, alpha=1.2, quantizer_bits=1, code_blocklength=2): results = [] for i in count(1): filename = generate_filename(SNR_dB, alpha, i, quantizer_bits, code_blocklength) if os.path.isfile(filename): results.append(Measurement.load(filename)) else: break return results def simulate_and_record(params): # Take measurement bad = False if SNR_dB != 'noiseless': params.set_random_code() # Use different codes each time try: simulate(params) except (ValueError, TypeError): input("Bad! :( ") bad = True # Generate filename for i in count(1): filename = 'bad-' if bad else '' filename += generate_filename(SNR_dB, params.alpha, i, params.quantizer_bits, params.code_blocklength) if not os.path.isfile(filename): break print("Saving to {}".format(filename)) measurements[-1].save(filename) measurements = [] def simulate(params=params, get_noise_record=lambda: None, plots=False): global sim, measurements for i in range(n_runs): sim = Simulation(params, get_noise_record()) measurement = Measurement(params) measurements.append(measurement) if plots: tracker = sim.encoder.get_tracker().clone() prev_distr = tracker.distr prev_lm_encoder = tracker.lm_encoder prev_lm_decoder = tracker.lm_decoder try: for t in sim.simulate(T): measurement.record(sim) if plots: if t == 1: if hasattr(prev_distr, 'is_hikmet'): plot_lloyd_max_hikmet(prev_distr, prev_lm_encoder.boundaries, prev_lm_decoder.levels, x_hit=sim.plant.x) else: plot_lloyd_max(prev_distr, prev_lm_encoder, prev_lm_decoder, x_hit=sim.plant.x) else: if hasattr(prev_distr, 'is_hikmet'): plot_lloyd_max_tracker_hikmet(prev_distr, prev_lm_encoder.boundaries, prev_lm_decoder.levels, tracker.d1, tracker.fw, x_hit=sim.plant.x) else: plot_lloyd_max_tracker(prev_distr, prev_lm_encoder, prev_lm_decoder, tracker, x_hit=sim.plant.x) tracker = sim.encoder.get_tracker().clone() prev_distr = tracker.distr prev_lm_encoder = tracker.lm_encoder prev_lm_decoder = tracker.lm_decoder print("Run {:d}, t = {:d} done".format(i, t)) except KeyboardInterrupt: print("Keyboard interrupt!") print(" Average power over channel: {:.4f}".format( sim.channel.average_power())) globals().update(params.all()) # Bring parameters into scope def plot(average=True): figure = plt.figure() for measurement in measurements if not average \ else [Measurement.average(measurements)]: plt.figure(figure.number) measurement.plot_setup() measurement.plot_LQG() measurement.plot_bounds() def generate_plot_lloyd_max(n_levels): distr = st.norm enc, dec = lm.generate(n_levels, distr) plot_lloyd_max(distr, enc, dec) def test_update(i=4): global tracker from separate.coding.source import DistributionTracker tracker = DistributionTracker(sim, 10) plot_lloyd_max(tracker.distr, tracker.lm_encoder, tracker.lm_decoder) tracker.update(i, debug_globals=globals()) plot_lloyd_max(tracker.distr, tracker.lm_encoder, tracker.lm_decoder) def plot_compare(): jscc = Measurement.load('data/joint/alpha_1.001_SNR_2_KC_32-runs.p') separate1 = Measurement.load('data/separate/alpha_1.001_SNR_2_KC_2--1.p') separate2 = Measurement.load('data/separate/alpha_1.001_SNR_2_KC_2--2.p') plt.figure() jscc.plot_setup() # Plot in the right order so that the legend reads top-down separate1.plot_LQG("Separation, single run") separate2.plot_LQG("Separation, single run") jscc.plot_LQG("Spiral JSCC, 32-run average") jscc.plot_bounds(upper_label="Theoretical prediction (spiral JSCC)") plt.legend() plt.text(25, 5, jscc.params.text_description(), bbox={'facecolor': 'white', 'edgecolor': 'gray'}) def plot_compare_2(): jscc_avg = Measurement.load('data/joint/alpha_1.5_SNR_2_KC_2_256-runs.p') jscc = Measurement.load('data/comparison/alpha_1.5_SNR_2_KC_2--1-joint.p') sep = Measurement.load('data/comparison/alpha_1.5_SNR_2_KC_2--1-separate.p') jscc.plot_setup() sep.plot_LQG("Tandem with (2-PAM)$^2$") sep.plot_correctly_decoded() jscc.plot_LQG("Spiral JSCC, same noise sequences") jscc_avg.plot_LQG("Spiral JSCC, 256-run average") jscc.plot_bounds(upper_label="Theoretical prediction (spiral JSCC)") plt.legend(loc=(.55, .48)) plt.text(40, 1.6, jscc.params.text_description(), bbox={'facecolor': 'white', 'edgecolor': 'gray'}) def plot_compare_3(): import matplotlib matplotlib.rcParams.update({'font.size': 20, 'lines.linewidth': 3}) jscc = Measurement.load('data/comparison/alpha_1.2_SNR_4.5dB_KC_2--1-joint.p') sep = Measurement.load('data/comparison/alpha_1.2_SNR_4.5dB_KC_2--1-separate.p') jscc.plot_setup(label="t") sep.plot_LQG("2-PAM", ':') sep.plot_correctly_decoded(y=-15) jscc.plot_LQG("Spiral", '-') jscc.plot_bounds(upper_label="Spiral: analytic", lower_label="OPTA", upper_args=['-.'], lower_args=['--']) plt.legend(loc=(.4, .1)) def plot_varying_SNR(alpha, multi=False, log_outside=True): plt.figure() def closest(n): return min(256, 512, 1024, 1536, 2048, key=lambda x: abs(x - n)) # SNR_dBs = [9, 10, 10.5, 11, 11.5, 12, 13, 13.5, 14, 15, 17, 20, 23, 25] # SNR_dBs = [7.5, 8, 8.5, 9, 10, 10.25, 10.5, 10.75, 11, 11.25, 11.5, 11.75, 12, 13, # 14, 15, 16, 'noiseless'] if not multi: # SNR_dBs = sorted([ # # 10, 10.25, 10.5, 10.75, # 11, 11.25, 11.5, 11.75, # 12, 12.25, 12.5, 12.75, # 13, 13.5, 13.75, # 14, 15, 16 # ]) SNR_dBs = sorted( # [1, 1.5, 2, 2.5, 3, 3.5] + [4, 4.25, 4.5, 5, 5.5, 6, 6.5, 7, 15, 25]) ms = {SNR_dB: load_measurements(SNR_dB, alpha) for SNR_dB in SNR_dBs} else: # SNR_dBs = sorted([30, 25, 24.5, 24, 23.5, 23, 22.5, 22, 20, 17.5, 15, 12.5, 10]) # SNR_dBs = sorted([8, 25, 24.5, 24, 23.5, 23, 22.75, 22.5, 22]) SNR_dBs = sorted([7, 8, 8.5, 9, 9.5, 10, 11, 12, 15, 25]) ms = {SNR_dB: load_measurements(SNR_dB, alpha, 2, 4) for SNR_dB in SNR_dBs} if log_outside: LQGlog10s = [10 * np.log10(np.mean([m.LQG[-1] for m in ms[SNR_dB]])) for SNR_dB in SNR_dBs] else: LQGlog10s = [np.mean([10 * np.log10(m.LQG[-1]) for m in ms[SNR_dB]]) for SNR_dB in SNR_dBs] plt.grid() SNR_dBs += [None] LQGlog10s += [None] plt.scatter(SNR_dBs[:-1], LQGlog10s[:-1]) # (without noiseless) plt.xlabel("SNR [dB]") plt.ylabel("Average final average cost [dB]") del SNR_dBs[-1], LQGlog10s[-1] for SNR_dB, LQGlog10 in zip(SNR_dBs, LQGlog10s): print("{:5}dB: {} ({} runs)".format(SNR_dB, LQGlog10, len(ms[SNR_dB]) ))#, # closest(len(ms[SNR_dB])))) def show(delay=0): if delay != 0: from time import sleep sleep(delay) plt.show(block=False) def take_data(): global SNR_dB for alpha in [1.2]: #, 1.2]: for SNR_dB in [ # 11, 12 # 8.5, 9.5 15, 25 ]: SNR = 10**(SNR_dB / 10) params = Parameters( T = T, alpha = alpha, W = 1, V = 0, # Lloyd-Max paper assumes no observation noise Q = 1, R = 0, F = 1) params.setRates(KC = 2, KS = 1) params.setAnalog(SNR) params.quantizer_bits = 1 params.setBlocklength(2) params.set_PAM() params.setScheme('separate') params.set_random_code() N = 256 n = defaultdict(lambda: N, {3: 2 * 256 - 280}) for _ in range(n[SNR_dB]): try: simulate_and_record(params) except KeyboardInterrupt: break except (ValueError, TypeError): pass del measurements[:]
Vaidyanath/tempest
refs/heads/master
tempest/api/compute/admin/test_aggregates.py
1
# Copyright 2013 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib import exceptions as lib_exc from tempest.api.compute import base from tempest.common import tempest_fixtures as fixtures from tempest.common.utils import data_utils from tempest import test class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest): """ Tests Aggregates API that require admin privileges """ _host_key = 'OS-EXT-SRV-ATTR:host' @classmethod def resource_setup(cls): super(AggregatesAdminTestJSON, cls).resource_setup() cls.client = cls.os_adm.aggregates_client cls.aggregate_name_prefix = 'test_aggregate_' cls.az_name_prefix = 'test_az_' hosts_all = cls.os_adm.hosts_client.list_hosts() hosts = map(lambda x: x['host_name'], filter(lambda y: y['service'] == 'compute', hosts_all)) cls.host = hosts[0] def _try_delete_aggregate(self, aggregate_id): # delete aggregate, if it exists try: self.client.delete_aggregate(aggregate_id) # if aggregate not found, it depict it was deleted in the test except lib_exc.NotFound: pass @test.attr(type='gate') def test_aggregate_create_delete(self): # Create and delete an aggregate. aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self._try_delete_aggregate, aggregate['id']) self.assertEqual(aggregate_name, aggregate['name']) self.assertIsNone(aggregate['availability_zone']) self.client.delete_aggregate(aggregate['id']) self.client.wait_for_resource_deletion(aggregate['id']) @test.attr(type='gate') def test_aggregate_create_delete_with_az(self): # Create and delete an aggregate. aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) az_name = data_utils.rand_name(self.az_name_prefix) aggregate = self.client.create_aggregate( name=aggregate_name, availability_zone=az_name) self.addCleanup(self._try_delete_aggregate, aggregate['id']) self.assertEqual(aggregate_name, aggregate['name']) self.assertEqual(az_name, aggregate['availability_zone']) self.client.delete_aggregate(aggregate['id']) self.client.wait_for_resource_deletion(aggregate['id']) @test.attr(type='gate') def test_aggregate_create_verify_entry_in_list(self): # Create an aggregate and ensure it is listed. aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) aggregates = self.client.list_aggregates() self.assertIn((aggregate['id'], aggregate['availability_zone']), map(lambda x: (x['id'], x['availability_zone']), aggregates)) @test.attr(type='gate') def test_aggregate_create_update_metadata_get_details(self): # Create an aggregate and ensure its details are returned. aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) body = self.client.get_aggregate(aggregate['id']) self.assertEqual(aggregate['name'], body['name']) self.assertEqual(aggregate['availability_zone'], body['availability_zone']) self.assertEqual({}, body["metadata"]) # set the metadata of the aggregate meta = {"key": "value"} body = self.client.set_metadata(aggregate['id'], meta) self.assertEqual(meta, body["metadata"]) # verify the metadata has been set body = self.client.get_aggregate(aggregate['id']) self.assertEqual(meta, body["metadata"]) @test.attr(type='gate') def test_aggregate_create_update_with_az(self): # Update an aggregate and ensure properties are updated correctly aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) az_name = data_utils.rand_name(self.az_name_prefix) aggregate = self.client.create_aggregate( name=aggregate_name, availability_zone=az_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) self.assertEqual(aggregate_name, aggregate['name']) self.assertEqual(az_name, aggregate['availability_zone']) self.assertIsNotNone(aggregate['id']) aggregate_id = aggregate['id'] new_aggregate_name = aggregate_name + '_new' new_az_name = az_name + '_new' resp_aggregate = self.client.update_aggregate(aggregate_id, new_aggregate_name, new_az_name) self.assertEqual(new_aggregate_name, resp_aggregate['name']) self.assertEqual(new_az_name, resp_aggregate['availability_zone']) aggregates = self.client.list_aggregates() self.assertIn((aggregate_id, new_aggregate_name, new_az_name), map(lambda x: (x['id'], x['name'], x['availability_zone']), aggregates)) @test.attr(type='gate') def test_aggregate_add_remove_host(self): # Add an host to the given aggregate and remove. self.useFixture(fixtures.LockFixture('availability_zone')) aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) body = self.client.add_host(aggregate['id'], self.host) self.assertEqual(aggregate_name, body['name']) self.assertEqual(aggregate['availability_zone'], body['availability_zone']) self.assertIn(self.host, body['hosts']) body = self.client.remove_host(aggregate['id'], self.host) self.assertEqual(aggregate_name, body['name']) self.assertEqual(aggregate['availability_zone'], body['availability_zone']) self.assertNotIn(self.host, body['hosts']) @test.attr(type='gate') def test_aggregate_add_host_list(self): # Add an host to the given aggregate and list. self.useFixture(fixtures.LockFixture('availability_zone')) aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) self.client.add_host(aggregate['id'], self.host) self.addCleanup(self.client.remove_host, aggregate['id'], self.host) aggregates = self.client.list_aggregates() aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates) self.assertEqual(1, len(aggs)) agg = aggs[0] self.assertEqual(aggregate_name, agg['name']) self.assertIsNone(agg['availability_zone']) self.assertIn(self.host, agg['hosts']) @test.attr(type='gate') def test_aggregate_add_host_get_details(self): # Add an host to the given aggregate and get details. self.useFixture(fixtures.LockFixture('availability_zone')) aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) aggregate = self.client.create_aggregate(name=aggregate_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) self.client.add_host(aggregate['id'], self.host) self.addCleanup(self.client.remove_host, aggregate['id'], self.host) body = self.client.get_aggregate(aggregate['id']) self.assertEqual(aggregate_name, body['name']) self.assertIsNone(body['availability_zone']) self.assertIn(self.host, body['hosts']) @test.attr(type='gate') def test_aggregate_add_host_create_server_with_az(self): # Add an host to the given aggregate and create a server. self.useFixture(fixtures.LockFixture('availability_zone')) aggregate_name = data_utils.rand_name(self.aggregate_name_prefix) az_name = data_utils.rand_name(self.az_name_prefix) aggregate = self.client.create_aggregate( name=aggregate_name, availability_zone=az_name) self.addCleanup(self.client.delete_aggregate, aggregate['id']) self.client.add_host(aggregate['id'], self.host) self.addCleanup(self.client.remove_host, aggregate['id'], self.host) server_name = data_utils.rand_name('test_server_') admin_servers_client = self.os_adm.servers_client server = self.create_test_server(name=server_name, availability_zone=az_name, wait_until='ACTIVE') body = admin_servers_client.get_server(server['id']) self.assertEqual(self.host, body[self._host_key])
akosyakov/intellij-community
refs/heads/master
python/testData/intentions/afterReturnTypeInPy3Annotation2.py
166
def my_func(p1=1) -> object: return p1 d = my_func(1)
michellemorales/OpenMM
refs/heads/master
models/skip_thoughts/skip_thoughts/encoder_manager.py
14
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Manager class for loading and encoding with multiple skip-thoughts models. If multiple models are loaded at once then the encode() function returns the concatenation of the outputs of each model. Example usage: manager = EncoderManager() manager.load_model(model_config_1, vocabulary_file_1, embedding_matrix_file_1, checkpoint_path_1) manager.load_model(model_config_2, vocabulary_file_2, embedding_matrix_file_2, checkpoint_path_2) encodings = manager.encode(data) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np import tensorflow as tf from skip_thoughts import skip_thoughts_encoder class EncoderManager(object): """Manager class for loading and encoding with skip-thoughts models.""" def __init__(self): self.encoders = [] self.sessions = [] def load_model(self, model_config, vocabulary_file, embedding_matrix_file, checkpoint_path): """Loads a skip-thoughts model. Args: model_config: Object containing parameters for building the model. vocabulary_file: Path to vocabulary file containing a list of newline- separated words where the word id is the corresponding 0-based index in the file. embedding_matrix_file: Path to a serialized numpy array of shape [vocab_size, embedding_dim]. checkpoint_path: SkipThoughtsModel checkpoint file or a directory containing a checkpoint file. """ tf.logging.info("Reading vocabulary from %s", vocabulary_file) with tf.gfile.GFile(vocabulary_file, mode="r") as f: lines = list(f.readlines()) reverse_vocab = [line.decode("utf-8").strip() for line in lines] tf.logging.info("Loaded vocabulary with %d words.", len(reverse_vocab)) tf.logging.info("Loading embedding matrix from %s", embedding_matrix_file) # Note: tf.gfile.GFile doesn't work here because np.load() calls f.seek() # with 3 arguments. with open(embedding_matrix_file, "r") as f: embedding_matrix = np.load(f) tf.logging.info("Loaded embedding matrix with shape %s", embedding_matrix.shape) word_embeddings = collections.OrderedDict( zip(reverse_vocab, embedding_matrix)) g = tf.Graph() with g.as_default(): encoder = skip_thoughts_encoder.SkipThoughtsEncoder(word_embeddings) restore_model = encoder.build_graph_from_config(model_config, checkpoint_path) sess = tf.Session(graph=g) restore_model(sess) self.encoders.append(encoder) self.sessions.append(sess) def encode(self, data, use_norm=True, verbose=False, batch_size=128, use_eos=False): """Encodes a sequence of sentences as skip-thought vectors. Args: data: A list of input strings. use_norm: If True, normalize output skip-thought vectors to unit L2 norm. verbose: Whether to log every batch. batch_size: Batch size for the RNN encoders. use_eos: If True, append the end-of-sentence word to each input sentence. Returns: thought_vectors: A list of numpy arrays corresponding to 'data'. Raises: ValueError: If called before calling load_encoder. """ if not self.encoders: raise ValueError( "Must call load_model at least once before calling encode.") encoded = [] for encoder, sess in zip(self.encoders, self.sessions): encoded.append( np.array( encoder.encode( sess, data, use_norm=use_norm, verbose=verbose, batch_size=batch_size, use_eos=use_eos))) return np.concatenate(encoded, axis=1) def close(self): """Closes the active TensorFlow Sessions.""" for sess in self.sessions: sess.close()
Symfomany/Horus
refs/heads/master
py/geopy/geopy/geohash.py
82
from geopy import Point class Geohash(object): ENCODE_MAP = '0123456789bcdefghjkmnpqrstuvwxyz' DECODE_MAP = dict([(char, i) for i, char in enumerate(ENCODE_MAP)]) def __init__(self, point_class=Point, precision=12): self.point_class = point_class self.precision = precision def encode(self, *args, **kwargs): precision = kwargs.pop('precision', self.precision) point = Point(*args, **kwargs) lat_min, latitude, lat_max = -90, 0, 90 long_min, longitude, long_max = -180, 0, 180 string = '' bytes = [] odd_bit = False for i in xrange(precision): byte = 0 for bit in (16, 8, 4, 2, 1): if odd_bit: if point.latitude >= latitude: byte |= bit lat_min = latitude else: lat_max = latitude latitude = (lat_min + lat_max) / 2. else: if point.longitude >= longitude: byte |= bit long_min = longitude else: long_max = longitude longitude = (long_min + long_max) / 2. odd_bit = not odd_bit bytes.append(byte) return ''.join([self.ENCODE_MAP[byte] for byte in bytes]) def decode(self, string): lat_min, latitude, lat_max = -90, 0, 90 long_min, longitude, long_max = -180, 0, 180 odd_bit = False for char in string: try: byte = self.DECODE_MAP[char] except KeyError: raise ValueError("Invalid hash: unexpected character %r." % (c,)) else: for bit in (16, 8, 4, 2, 1): if odd_bit: if byte & bit: lat_min = latitude else: lat_max = latitude latitude = (lat_min + lat_max) / 2. else: if byte & bit: long_min = longitude else: long_max = longitude longitude = (long_min + long_max) / 2. odd_bit = not odd_bit point = self.point_class((latitude, longitude)) point.error = (lat_max - latitude, long_max - longitude) return point
rickerc/nova_audit
refs/heads/cis-havana-staging
nova/tests/cells/test_cells_rpc_driver.py
11
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Cells RPC Communication Driver """ import urlparse from oslo.config import cfg from nova.cells import messaging from nova.cells import rpc_driver from nova import context from nova.openstack.common import rpc from nova.openstack.common.rpc import dispatcher as rpc_dispatcher from nova import test from nova.tests.cells import fakes CONF = cfg.CONF CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver', group='cells') class CellsRPCDriverTestCase(test.NoDBTestCase): """Test case for Cells communication via RPC.""" def setUp(self): super(CellsRPCDriverTestCase, self).setUp() fakes.init(self) self.ctxt = context.RequestContext('fake', 'fake') self.driver = rpc_driver.CellsRPCDriver() def test_start_consumers(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') rpc_consumers = [] rpc_conns = [] fake_msg_runner = fakes.get_message_runner('api-cell') call_info = {} class FakeInterCellRPCDispatcher(object): def __init__(_self, msg_runner): self.assertEqual(fake_msg_runner, msg_runner) call_info['intercell_dispatcher'] = _self class FakeRPCDispatcher(object): def __init__(_self, proxy_objs): self.assertEqual([call_info['intercell_dispatcher']], proxy_objs) call_info['rpc_dispatcher'] = _self class FakeRPCConn(object): def create_consumer(_self, topic, proxy_obj, **kwargs): self.assertEqual(call_info['rpc_dispatcher'], proxy_obj) rpc_consumers.append((topic, kwargs)) def consume_in_thread(_self): pass def _fake_create_connection(new): self.assertTrue(new) fake_conn = FakeRPCConn() rpc_conns.append(fake_conn) return fake_conn self.stubs.Set(rpc, 'create_connection', _fake_create_connection) self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher', FakeInterCellRPCDispatcher) self.stubs.Set(rpc_dispatcher, 'RpcDispatcher', FakeRPCDispatcher) self.driver.start_consumers(fake_msg_runner) for message_type in ['broadcast', 'response', 'targeted']: topic = 'cells.intercell42.' + message_type self.assertIn((topic, {'fanout': True}), rpc_consumers) self.assertIn((topic, {'fanout': False}), rpc_consumers) self.assertEqual(rpc_conns, self.driver.rpc_connections) def test_stop_consumers(self): call_info = {'closed': []} class FakeRPCConn(object): def close(self): call_info['closed'].append(self) fake_conns = [FakeRPCConn() for x in xrange(5)] self.driver.rpc_connections = fake_conns self.driver.stop_consumers() self.assertEqual(fake_conns, call_info['closed']) def test_send_message_to_cell_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=False) call_info = {} def _fake_make_msg(method, namespace, **kwargs): call_info['rpc_method'] = method call_info['rpc_kwargs'] = kwargs return 'fake-message' def _fake_cast_to_server(*args, **kwargs): call_info['cast_args'] = args call_info['cast_kwargs'] = kwargs self.stubs.Set(rpc, 'cast_to_server', _fake_cast_to_server) self.stubs.Set(self.driver.intercell_rpcapi, 'make_namespaced_msg', _fake_make_msg) self.stubs.Set(self.driver.intercell_rpcapi, 'cast_to_server', _fake_cast_to_server) self.driver.send_message_to_cell(cell_state, message) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_cast_args = (self.ctxt, expected_server_params, 'fake-message') expected_cast_kwargs = {'topic': 'cells.intercell.targeted'} expected_rpc_kwargs = {'message': message.to_json()} self.assertEqual(expected_cast_args, call_info['cast_args']) self.assertEqual(expected_cast_kwargs, call_info['cast_kwargs']) self.assertEqual('process_message', call_info['rpc_method']) self.assertEqual(expected_rpc_kwargs, call_info['rpc_kwargs']) def test_send_message_to_cell_fanout_cast(self): msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._TargetedMessage(msg_runner, self.ctxt, 'fake', {}, 'down', cell_state, fanout=True) call_info = {} def _fake_make_msg(method, namespace, **kwargs): call_info['rpc_method'] = method call_info['rpc_kwargs'] = kwargs return 'fake-message' def _fake_fanout_cast_to_server(*args, **kwargs): call_info['cast_args'] = args call_info['cast_kwargs'] = kwargs self.stubs.Set(rpc, 'fanout_cast_to_server', _fake_fanout_cast_to_server) self.stubs.Set(self.driver.intercell_rpcapi, 'make_namespaced_msg', _fake_make_msg) self.stubs.Set(self.driver.intercell_rpcapi, 'fanout_cast_to_server', _fake_fanout_cast_to_server) self.driver.send_message_to_cell(cell_state, message) expected_server_params = {'hostname': 'rpc_host2', 'password': 'password2', 'port': 3092, 'username': 'username2', 'virtual_host': 'rpc_vhost2'} expected_cast_args = (self.ctxt, expected_server_params, 'fake-message') expected_cast_kwargs = {'topic': 'cells.intercell.targeted'} expected_rpc_kwargs = {'message': message.to_json()} self.assertEqual(expected_cast_args, call_info['cast_args']) self.assertEqual(expected_cast_kwargs, call_info['cast_kwargs']) self.assertEqual('process_message', call_info['rpc_method']) self.assertEqual(expected_rpc_kwargs, call_info['rpc_kwargs']) def test_rpc_topic_uses_message_type(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') msg_runner = fakes.get_message_runner('api-cell') cell_state = fakes.get_cell_state('api-cell', 'child-cell2') message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) message.message_type = 'fake-message-type' call_info = {} def _fake_fanout_cast_to_server(*args, **kwargs): call_info['topic'] = kwargs.get('topic') self.stubs.Set(self.driver.intercell_rpcapi, 'fanout_cast_to_server', _fake_fanout_cast_to_server) self.driver.send_message_to_cell(cell_state, message) self.assertEqual('cells.intercell42.fake-message-type', call_info['topic']) def test_process_message(self): msg_runner = fakes.get_message_runner('api-cell') dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner) message = messaging._BroadcastMessage(msg_runner, self.ctxt, 'fake', {}, 'down', fanout=True) call_info = {} def _fake_message_from_json(json_message): call_info['json_message'] = json_message self.assertEqual(message.to_json(), json_message) return message def _fake_process(): call_info['process_called'] = True self.stubs.Set(msg_runner, 'message_from_json', _fake_message_from_json) self.stubs.Set(message, 'process', _fake_process) dispatcher.process_message(self.ctxt, message.to_json()) self.assertEqual(message.to_json(), call_info['json_message']) self.assertTrue(call_info['process_called']) class ParseTransportURLTestCase(test.NoDBTestCase): def test_bad_scheme(self): url = "bad:///" self.assertRaises(ValueError, rpc_driver.parse_transport_url, url) def test_query_string(self): url = "rabbit://u:p@h:10/virtual?ssl=1" self.assertRaises(ValueError, rpc_driver.parse_transport_url, url) def test_query_string_old_urlparse(self): # Test parse_transport_url with urlparse.urlparse behaving as in python # 2.7.3 or below. See https://bugs.launchpad.net/nova/+bug/1202149 url = "rabbit://u:p@h:10/virtual?ssl=1" parse_result = urlparse.ParseResult( scheme='rabbit', netloc='u:p@h:10', path='/virtual?ssl=1', params='', query='', fragment='' ) self.mox.StubOutWithMock(urlparse, 'urlparse') urlparse.urlparse(url).AndReturn(parse_result) self.mox.ReplayAll() self.assertRaises(ValueError, rpc_driver.parse_transport_url, url) def test_query_string_new_urlparse(self): # Test parse_transport_url with urlparse.urlparse behaving as in python # 2.7.4 or above. See https://bugs.launchpad.net/nova/+bug/1202149 url = "rabbit://u:p@h:10/virtual?ssl=1" parse_result = urlparse.ParseResult( scheme='rabbit', netloc='u:p@h:10', path='/virtual', params='', query='ssl=1', fragment='' ) self.mox.StubOutWithMock(urlparse, 'urlparse') urlparse.urlparse(url).AndReturn(parse_result) self.mox.ReplayAll() self.assertRaises(ValueError, rpc_driver.parse_transport_url, url) def test_empty(self): url = "rabbit:" result = rpc_driver.parse_transport_url(url) self.assertEqual(result, { 'username': None, 'password': None, 'hostname': None, 'port': None, 'virtual_host': None, }) def test_normal_parsing(self): url = "rabbit://us%65r:p%61ss@host.example.com:10/virtual%5fhost" result = rpc_driver.parse_transport_url(url) self.assertEqual(result, { 'username': 'user', 'password': 'pass', 'hostname': 'host.example.com', 'port': 10, 'virtual_host': 'virtual_host', }) def test_normal_ipv6_parsing(self): url = "rabbit://us%65r:p%61ss@[ffff::1]:10/virtual%5fhost" result = rpc_driver.parse_transport_url(url) self.assertEqual(result, { 'username': 'user', 'password': 'pass', 'hostname': 'ffff::1', 'port': 10, 'virtual_host': 'virtual_host', }) def test_normal_parsing_no_port(self): url = "rabbit://us%65r:p%61ss@host.example.com/virtual%5fhost" result = rpc_driver.parse_transport_url(url) self.assertEqual(result, { 'username': 'user', 'password': 'pass', 'hostname': 'host.example.com', 'port': None, 'virtual_host': 'virtual_host', }) def test_normal_ipv6_parsing_no_port(self): url = "rabbit://us%65r:p%61ss@[ffff::1]/virtual%5fhost" result = rpc_driver.parse_transport_url(url) self.assertEqual(result, { 'username': 'user', 'password': 'pass', 'hostname': 'ffff::1', 'port': None, 'virtual_host': 'virtual_host', }) def test_invalid_ipv6_parsing(self): url = "rabbit://user:pass@[ffff::1/virtual_host" self.assertRaises(ValueError, rpc_driver.parse_transport_url, url) class UnparseTransportURLTestCase(test.NoDBTestCase): def test_empty(self): result = rpc_driver.unparse_transport_url({}) self.assertEqual(result, "rabbit:///") def test_username_only(self): result = rpc_driver.unparse_transport_url({'username': 'user/'}) self.assertEqual(result, "rabbit://user%2F@/") def test_password_only(self): result = rpc_driver.unparse_transport_url({'password': 'pass/'}) self.assertEqual(result, "rabbit://:pass%2F@/") def test_hostname_only(self): result = rpc_driver.unparse_transport_url({'hostname': 'example.com'}) self.assertEqual(result, "rabbit://example.com/") def test_hostname_v6_only(self): result = rpc_driver.unparse_transport_url({'hostname': 'ffff::1'}) self.assertEqual(result, "rabbit://[ffff::1]/") def test_port_only(self): result = rpc_driver.unparse_transport_url({'port': 2345}) self.assertEqual(result, "rabbit://:2345/") def test_virtual_host_only(self): result = rpc_driver.unparse_transport_url({'virtual_host': 'virtual/'}) self.assertEqual(result, "rabbit:///virtual%2F") def test_complete_secure(self): transport = { 'username': 'user', 'password': 'pass', 'hostname': 'example.com', 'port': 2345, 'virtual_host': 'virtual', } result = rpc_driver.unparse_transport_url(transport) self.assertEqual(result, "rabbit://user:pass@example.com:2345/virtual") def test_complete_insecure(self): transport = { 'username': 'user', 'password': 'pass', 'hostname': 'example.com', 'port': 2345, 'virtual_host': 'virtual', } result = rpc_driver.unparse_transport_url(transport, False) self.assertEqual(result, "rabbit://user@example.com:2345/virtual")
msm8660coolstuff/android_kernel_samsung_msm8660
refs/heads/cm-11.0
arch/ia64/scripts/unwcheck.py
13143
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
fake-name/ReadableWebProxy
refs/heads/master
WebMirror/management/rss_parser_funcs/feed_parse_extractBlastronDoesSomeThings.py
1
def extractBlastronDoesSomeThings(item): """ Parser for 'Blastron Does Some Things' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('honzuki no gekokujou', 'honzuki no gekokujou', 'translated'), ('kumo desu ga nani ka', 'kumo desu ga nani ka', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
jjo/openvpn-ipv6-legacy21
refs/heads/master
win/js.py
5
import json # usage: # print JSON().encode(kv) class JSON(json.JSONEncoder): def __init__(self, **kwargs): args = dict(sort_keys=True, indent=2) args.update(kwargs) json.JSONEncoder.__init__(self, **args)
willingc/oh-mainline
refs/heads/master
vendor/packages/python-social-auth/social/backends/rdio.py
79
""" Rdio OAuth1 and OAuth2 backends, docs at: http://psa.matiasaguirre.net/docs/backends/rdio.html """ from social.backends.oauth import BaseOAuth1, BaseOAuth2, OAuthAuth RDIO_API = 'https://www.rdio.com/api/1/' class BaseRdio(OAuthAuth): ID_KEY = 'key' def get_user_details(self, response): fullname, first_name, last_name = self.get_user_names( fullname=response['displayName'], first_name=response['firstName'], last_name=response['lastName'] ) return { 'username': response['username'], 'fullname': fullname, 'first_name': first_name, 'last_name': last_name } class RdioOAuth1(BaseRdio, BaseOAuth1): """Rdio OAuth authentication backend""" name = 'rdio-oauth1' REQUEST_TOKEN_URL = 'http://api.rdio.com/oauth/request_token' AUTHORIZATION_URL = 'https://www.rdio.com/oauth/authorize' ACCESS_TOKEN_URL = 'http://api.rdio.com/oauth/access_token' EXTRA_DATA = [ ('key', 'rdio_id'), ('icon', 'rdio_icon_url'), ('url', 'rdio_profile_url'), ('username', 'rdio_username'), ('streamRegion', 'rdio_stream_region'), ] def user_data(self, access_token, *args, **kwargs): """Return user data provided""" params = {'method': 'currentUser', 'extras': 'username,displayName,streamRegion'} request = self.oauth_request(access_token, RDIO_API, params, method='POST') return self.get_json(request.url, method='POST', data=request.to_postdata())['result'] class RdioOAuth2(BaseRdio, BaseOAuth2): name = 'rdio-oauth2' AUTHORIZATION_URL = 'https://www.rdio.com/oauth2/authorize' ACCESS_TOKEN_URL = 'https://www.rdio.com/oauth2/token' ACCESS_TOKEN_METHOD = 'POST' EXTRA_DATA = [ ('key', 'rdio_id'), ('icon', 'rdio_icon_url'), ('url', 'rdio_profile_url'), ('username', 'rdio_username'), ('streamRegion', 'rdio_stream_region'), ('refresh_token', 'refresh_token', True), ('token_type', 'token_type', True), ] def user_data(self, access_token, *args, **kwargs): return self.get_json(RDIO_API, method='POST', data={ 'method': 'currentUser', 'extras': 'username,displayName,streamRegion', 'access_token': access_token })['result']
dursk/django
refs/heads/master
tests/model_formsets_regress/tests.py
173
from __future__ import unicode_literals from django import forms from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet from django.forms.models import ( BaseModelFormSet, inlineformset_factory, modelform_factory, modelformset_factory, ) from django.forms.utils import ErrorDict, ErrorList from django.test import TestCase from django.utils import six from .models import ( Host, Manager, Network, ProfileNetwork, Restaurant, User, UserProfile, UserSite, ) class InlineFormsetTests(TestCase): def test_formset_over_to_field(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #10243" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=User()) # Now create a new User and UserSite instance data = { 'serial': '1', 'username': 'apollo13', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '0', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-data': '10', 'usersite_set-0-user': 'apollo13' } user = User() form = Form(data) if form.is_valid(): user = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 10) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the UserSite instance data = { 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new UserSite instance data = { 'usersite_set-TOTAL_FORMS': '2', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13', 'usersite_set-1-data': '42', 'usersite_set-1-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values().order_by('data') self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') self.assertEqual(usersite[1]['data'], 42) self.assertEqual(usersite[1]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_formset_over_inherited_model(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #11120" Form = modelform_factory(Restaurant, fields="__all__") FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=Restaurant()) # Now create a new Restaurant and Manager instance data = { 'name': "Guido's House of Pasta", 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '0', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-name': 'Guido Van Rossum' } restaurant = User() form = Form(data) if form.is_valid(): restaurant = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Guido Van Rossum') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the Manager instance data = { 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new Manager instance data = { 'manager_set-TOTAL_FORMS': '2', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam', 'manager_set-1-name': 'John Cleese' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values().order_by('name') self.assertEqual(manager[0]['name'], 'John Cleese') self.assertEqual(manager[1]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_inline_model_with_to_field(self): """ #13794 --- An inline model with a to_field of a formset with instance has working relations. """ FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',)) user = User.objects.create(username="guido", serial=1337) UserSite.objects.create(user=user, data=10) formset = FormSet(instance=user) # Testing the inline model's relation self.assertEqual(formset[0].instance.user_id, "guido") def test_inline_model_with_to_field_to_rel(self): """ #13794 --- An inline model with a to_field to a related field of a formset with instance has working relations. """ FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[]) user = User.objects.create(username="guido", serial=1337, pk=1) self.assertEqual(user.pk, 1) profile = UserProfile.objects.create(user=user, about="about", pk=2) self.assertEqual(profile.pk, 2) ProfileNetwork.objects.create(profile=profile, network=10, identifier=10) formset = FormSet(instance=profile) # Testing the inline model's relation self.assertEqual(formset[0].instance.profile_id, 1) def test_formset_with_none_instance(self): "A formset with instance=None can be created. Regression for #11872" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a formset with an instance of None Form(instance=None) FormSet(instance=None) def test_empty_fields_on_modelformset(self): """ No fields passed to modelformset_factory() should result in no fields on returned forms except for the id (#14119). """ UserFormSet = modelformset_factory(User, fields=()) formset = UserFormSet() for form in formset.forms: self.assertIn('id', form.fields) self.assertEqual(len(form.fields), 1) def test_save_as_new_with_new_inlines(self): """ Existing and new inlines are saved with save_as_new. Regression for #14938. """ efnet = Network.objects.create(name="EFNet") host1 = Host.objects.create(hostname="irc.he.net", network=efnet) HostFormSet = inlineformset_factory(Network, Host, fields="__all__") # Add a new host, modify previous host, and save-as-new data = { 'host_set-TOTAL_FORMS': '2', 'host_set-INITIAL_FORMS': '1', 'host_set-MAX_NUM_FORMS': '0', 'host_set-0-id': six.text_type(host1.id), 'host_set-0-hostname': 'tranquility.hub.dal.net', 'host_set-1-hostname': 'matrix.de.eu.dal.net' } # To save a formset as new, it needs a new hub instance dalnet = Network.objects.create(name="DALnet") formset = HostFormSet(data, instance=dalnet, save_as_new=True) self.assertTrue(formset.is_valid()) formset.save() self.assertQuerysetEqual( dalnet.host_set.order_by("hostname"), ["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"] ) def test_initial_data(self): user = User.objects.create(username="bibi", serial=1) UserSite.objects.create(user=user, data=7) FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__") formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}]) self.assertEqual(formset.forms[0].initial['data'], 7) self.assertEqual(formset.extra_forms[0].initial['data'], 41) self.assertIn('value="42"', formset.extra_forms[1].as_p()) class FormsetTests(TestCase): def test_error_class(self): ''' Test the type of Formset and Form error attributes ''' Formset = modelformset_factory(User, fields="__all__") data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', 'form-0-id': '', 'form-0-username': 'apollo13', 'form-0-serial': '1', 'form-1-id': '', 'form-1-username': 'apollo13', 'form-1-serial': '2', } formset = Formset(data) # check if the returned error classes are correct # note: formset.errors returns a list as documented self.assertIsInstance(formset.errors, list) self.assertIsInstance(formset.non_form_errors(), ErrorList) for form in formset.forms: self.assertIsInstance(form.errors, ErrorDict) self.assertIsInstance(form.non_field_errors(), ErrorList) def test_initial_data(self): User.objects.create(username="bibi", serial=1) Formset = modelformset_factory(User, fields="__all__", extra=2) formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}]) self.assertEqual(formset.forms[0].initial['username'], "bibi") self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11") self.assertIn('value="apollo12"', formset.extra_forms[1].as_p()) def test_extraneous_query_is_not_run(self): Formset = modelformset_factory(Network, fields="__all__") data = {'test-TOTAL_FORMS': '1', 'test-INITIAL_FORMS': '0', 'test-MAX_NUM_FORMS': '', 'test-0-name': 'Random Place', } with self.assertNumQueries(1): formset = Formset(data, prefix="test") formset.save() class CustomWidget(forms.widgets.TextInput): pass class UserSiteForm(forms.ModelForm): class Meta: model = UserSite fields = "__all__" widgets = { 'id': CustomWidget, 'data': CustomWidget, } localized_fields = ('data',) class Callback(object): def __init__(self): self.log = [] def __call__(self, db_field, **kwargs): self.log.append((db_field, kwargs)) return db_field.formfield(**kwargs) class FormfieldCallbackTests(TestCase): """ Regression for #13095 and #17683: Using base forms with widgets defined in Meta should not raise errors and BaseModelForm should respect the specified pk widget. """ def test_inlineformset_factory_default(self): Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__") form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def test_modelformset_factory_default(self): Formset = modelformset_factory(UserSite, form=UserSiteForm) form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def assertCallbackCalled(self, callback): id_field, user_field, data_field = UserSite._meta.fields expected_log = [ (id_field, {'widget': CustomWidget}), (user_field, {}), (data_field, {'widget': CustomWidget, 'localize': True}), ] self.assertEqual(callback.log, expected_log) def test_inlineformset_custom_callback(self): callback = Callback() inlineformset_factory(User, UserSite, form=UserSiteForm, formfield_callback=callback, fields="__all__") self.assertCallbackCalled(callback) def test_modelformset_custom_callback(self): callback = Callback() modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback) self.assertCallbackCalled(callback) class BaseCustomDeleteFormSet(BaseFormSet): """ A formset mix-in that lets a form decide if it's to be deleted. Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed. form.should_delete() is called. The formset delete field is also suppressed. """ def add_fields(self, form, index): super(BaseCustomDeleteFormSet, self).add_fields(form, index) self.can_delete = True if DELETION_FIELD_NAME in form.fields: del form.fields[DELETION_FIELD_NAME] def _should_delete_form(self, form): return hasattr(form, 'should_delete') and form.should_delete() class FormfieldShouldDeleteFormTests(TestCase): """ Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form """ class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet): """ Model FormSet with CustomDelete MixIn """ class CustomDeleteUserForm(forms.ModelForm): """ A model form with a 'should_delete' method """ class Meta: model = User fields = "__all__" def should_delete(self): """ delete form if odd PK """ return self.instance.pk % 2 != 0 NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True) DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet) data = { 'form-TOTAL_FORMS': '4', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '4', 'form-0-username': 'John', 'form-0-serial': '1', 'form-1-username': 'Paul', 'form-1-serial': '2', 'form-2-username': 'George', 'form-2-serial': '3', 'form-3-username': 'Ringo', 'form-3-serial': '5', } delete_all_ids = { 'form-0-DELETE': '1', 'form-1-DELETE': '1', 'form-2-DELETE': '1', 'form-3-DELETE': '1', } def test_init_database(self): """ Add test data to database via formset """ formset = self.NormalFormset(self.data) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 4) def test_no_delete(self): """ Verify base formset doesn't modify database """ # reload database self.test_init_database() # pass standard data dict & see none updated data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 4) def test_all_delete(self): """ Verify base formset honors DELETE field """ # reload database self.test_init_database() # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 0) def test_custom_delete(self): """ Verify DeleteFormset ignores DELETE field and uses form method """ # reload database self.test_init_database() # Create formset with custom Delete function # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update({ 'form-%d-id' % i: user.pk for i, user in enumerate(User.objects.all()) }) data.update(self.delete_all_ids) formset = self.DeleteFormset(data, queryset=User.objects.all()) # verify two were deleted self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 2) # verify no "odd" PKs left odd_ids = [user.pk for user in User.objects.all() if user.pk % 2] self.assertEqual(len(odd_ids), 0) class RedeleteTests(TestCase): def test_resubmit(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) formset = formset_cls(data, instance=u) # Even if the "us" object isn't in the DB any more, the form # validates. self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) def test_delete_already_deleted(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) us.delete() self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0)
Hasimir/pyjs
refs/heads/master
pyjswidgets/pyjamas/chart/TouchedPointUpdateOption.py
7
""" * Copyright 2007,2008,2009 John C. Gunther * Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net> * * Licensed under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http:#www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. * """ """* * Defines how the <tt>update</tt> method updates the touched * point, that is, the point the user is considered to be * hovered over. * * @see #update(TouchedPointUpdateOption) update * """ class TouchedPointUpdateOption(object): def __init__(self): pass """* * When this option is passed to the update method, any * touched point is cleared as a consequence of the update. * <p> * * This option can be used when you want to "start fresh" * with regards to hover feedback after an update, and want * to assure that only explicit user-generated mouse move * actions (rather than objects moving <i>underneath</i> a * fixed-position mouse cursor) can trigger hover feedback. * * @see #update update * @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED * @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED * """ TOUCHED_POINT_CLEARED = TouchedPointUpdateOption() """* * When this option is passed to the update method, any * previously touched point is locked in (remains unchanged). * <p> * * For example, if the mouse is over a certain point before * the update, and that point moves away from the mouse * (without the mouse moving otherwise) as a consequence of * the update, the hover feedback remains "locked in" to the * original point, even though the mouse is no longer on top * of that point. * <p> * * This option is useful for hover widgets that modify the * position, size, symbol of points/curves, and do not want the * selected point/curve (and popup hover widget) to change as * a consequence of such changes. * <p> * * <i>Note:</i> If the currently touched point or the curve * containing it is deleted, GChart sets the touched point * reference to <tt>None</tt>. In that case, this option and * <tt>TOUCHED_POINT_CLEARED</tt> behave the same way. * * * @see #update update * @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED * @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED * """ TOUCHED_POINT_LOCKED = TouchedPointUpdateOption() """* * When this option is passed to the update method, the * touched point is updated so that it reflects whatever point * is underneath the mouse cursor after the update * completes. * <p> * * For example, if the mouse is not hovering over any point * before the update, but the update repositions one of the * points so that it is now underneath the mouse cursor, * the hover feedback for that point will be displayed. * Similarly, if the update moves a point away from the * mouse cursor, previously displayed hover feedback will * be eliminated. * <p> * * @see #update update * @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED * @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED * """ TOUCHED_POINT_UPDATED = TouchedPointUpdateOption()
wetneb/django
refs/heads/master
django/core/mail/backends/dummy.py
835
""" Dummy email backend that does nothing. """ from django.core.mail.backends.base import BaseEmailBackend class EmailBackend(BaseEmailBackend): def send_messages(self, email_messages): return len(list(email_messages))
logston/cunidecode
refs/heads/master
data/x02c.py
246
data = ( '', # 0x00 '', # 0x01 '', # 0x02 '', # 0x03 '', # 0x04 '', # 0x05 '', # 0x06 '', # 0x07 '', # 0x08 '', # 0x09 '', # 0x0a '', # 0x0b '', # 0x0c '', # 0x0d '', # 0x0e '', # 0x0f '', # 0x10 '', # 0x11 '', # 0x12 '', # 0x13 '', # 0x14 '', # 0x15 '', # 0x16 '', # 0x17 '', # 0x18 '', # 0x19 '', # 0x1a '', # 0x1b '', # 0x1c '', # 0x1d '', # 0x1e '', # 0x1f '', # 0x20 '', # 0x21 '', # 0x22 '', # 0x23 '', # 0x24 '', # 0x25 '', # 0x26 '', # 0x27 '', # 0x28 '', # 0x29 '', # 0x2a '', # 0x2b '', # 0x2c '', # 0x2d '', # 0x2e '', # 0x2f '', # 0x30 '', # 0x31 '', # 0x32 '', # 0x33 '', # 0x34 '', # 0x35 '', # 0x36 '', # 0x37 '', # 0x38 '', # 0x39 '', # 0x3a '', # 0x3b '', # 0x3c '', # 0x3d '', # 0x3e '', # 0x3f '', # 0x40 '', # 0x41 '', # 0x42 '', # 0x43 '', # 0x44 '', # 0x45 '', # 0x46 '', # 0x47 '', # 0x48 '', # 0x49 '', # 0x4a '', # 0x4b '', # 0x4c '', # 0x4d '', # 0x4e '', # 0x4f '', # 0x50 '', # 0x51 '', # 0x52 '', # 0x53 '', # 0x54 '', # 0x55 '', # 0x56 '', # 0x57 '', # 0x58 '', # 0x59 '', # 0x5a '', # 0x5b '', # 0x5c '', # 0x5d '', # 0x5e '', # 0x5f 'L', # 0x60 'l', # 0x61 'L', # 0x62 'P', # 0x63 'R', # 0x64 'a', # 0x65 't', # 0x66 'H', # 0x67 'h', # 0x68 'K', # 0x69 'k', # 0x6a 'Z', # 0x6b 'z', # 0x6c '', # 0x6d 'M', # 0x6e 'A', # 0x6f '', # 0x70 '', # 0x71 '', # 0x72 '', # 0x73 '', # 0x74 '', # 0x75 '', # 0x76 '', # 0x77 '', # 0x78 '', # 0x79 '', # 0x7a '', # 0x7b '', # 0x7c '', # 0x7d '', # 0x7e '', # 0x7f '', # 0x80 '', # 0x81 '', # 0x82 '', # 0x83 '', # 0x84 '', # 0x85 '', # 0x86 '', # 0x87 '', # 0x88 '', # 0x89 '', # 0x8a '', # 0x8b '', # 0x8c '', # 0x8d '', # 0x8e '', # 0x8f '', # 0x90 '', # 0x91 '', # 0x92 '', # 0x93 '', # 0x94 '', # 0x95 '', # 0x96 '', # 0x97 '', # 0x98 '', # 0x99 '', # 0x9a '', # 0x9b '', # 0x9c '', # 0x9d '', # 0x9e '', # 0x9f '', # 0xa0 '', # 0xa1 '', # 0xa2 '', # 0xa3 '', # 0xa4 '', # 0xa5 '', # 0xa6 '', # 0xa7 '', # 0xa8 '', # 0xa9 '', # 0xaa '', # 0xab '', # 0xac '', # 0xad '', # 0xae '', # 0xaf '', # 0xb0 '', # 0xb1 '', # 0xb2 '', # 0xb3 '', # 0xb4 '', # 0xb5 '', # 0xb6 '', # 0xb7 '', # 0xb8 '', # 0xb9 '', # 0xba '', # 0xbb '', # 0xbc '', # 0xbd '', # 0xbe '', # 0xbf '', # 0xc0 '', # 0xc1 '', # 0xc2 '', # 0xc3 '', # 0xc4 '', # 0xc5 '', # 0xc6 '', # 0xc7 '', # 0xc8 '', # 0xc9 '', # 0xca '', # 0xcb '', # 0xcc '', # 0xcd '', # 0xce '', # 0xcf '', # 0xd0 '', # 0xd1 '', # 0xd2 '', # 0xd3 '', # 0xd4 '', # 0xd5 '', # 0xd6 '', # 0xd7 '', # 0xd8 '', # 0xd9 '', # 0xda '', # 0xdb '', # 0xdc '', # 0xdd '', # 0xde '', # 0xdf '', # 0xe0 '', # 0xe1 '', # 0xe2 '', # 0xe3 '', # 0xe4 '', # 0xe5 '', # 0xe6 '', # 0xe7 '', # 0xe8 '', # 0xe9 '', # 0xea '', # 0xeb '', # 0xec '', # 0xed '', # 0xee '', # 0xef '', # 0xf0 '', # 0xf1 '', # 0xf2 '', # 0xf3 '', # 0xf4 '', # 0xf5 '', # 0xf6 '', # 0xf7 '', # 0xf8 '', # 0xf9 '', # 0xfa '', # 0xfb '', # 0xfc '', # 0xfd '', # 0xfe )
mioann47/mobile-app-privacy-analyzer
refs/heads/master
mypythonscripts/tools/modified/androguard/core/api_specific_resources/aosp_permissions/aosp_permissions_api15.py
27
#!/usr/bin/python # -*- coding: utf-8 -*- ################################################# ### Extracted from platform version: 4.0.4 ################################################# AOSP_PERMISSIONS = { 'android.permission.BIND_WALLPAPER' : {'permissionGroup' : '', 'description' : 'Allows the holder to bind to the top-level interface of a wallpaper. Should never be needed for normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'bind to a wallpaper'}, 'android.permission.FORCE_BACK' : {'permissionGroup' : '', 'description' : 'Allows an application to force any activity that is in the foreground to close and go back. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'force application to close'}, 'android.permission.READ_CALENDAR' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to read all calendar events stored on your phone, including those of friends or coworkers. A malicious application with this permission can extract personal information from these calendars without the owners\' knowledge.', 'protectionLevel' : 'dangerous', 'label' : 'read calendar events plus confidential information'}, 'android.permission.READ_FRAME_BUFFER' : {'permissionGroup' : '', 'description' : 'Allows application to read the content of the frame buffer.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'read frame buffer'}, 'android.permission.READ_SOCIAL_STREAM' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to access and sync social updates from you and your friends. Malicious apps can use this to read private communications between you and your friends on social networks.', 'protectionLevel' : 'dangerous', 'label' : 'read your social stream'}, 'android.permission.ACCESS_MTP' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows access to the kernel MTP driver to implement the MTP USB protocol.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'implement MTP protocol'}, 'android.permission.READ_NETWORK_USAGE_HISTORY' : {'permissionGroup' : '', 'description' : 'Allows an application to read historical network usage for specific networks and applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'read historical network usage'}, 'android.permission.READ_SYNC_STATS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to read the sync stats; e.g., the history of syncs that have occurred.', 'protectionLevel' : 'normal', 'label' : 'read sync statistics'}, 'android.permission.SHUTDOWN' : {'permissionGroup' : '', 'description' : 'Puts the activity manager into a shutdown state. Does not perform a complete shutdown.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'partial shutdown'}, 'android.permission.ACCESS_NETWORK_STATE' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to view the state of all networks.', 'protectionLevel' : 'normal', 'label' : 'view network state'}, 'android.permission.INTERNET' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to create network sockets.', 'protectionLevel' : 'dangerous', 'label' : 'full Internet access'}, 'android.permission.CHANGE_CONFIGURATION' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change the current configuration, such as the locale or overall font size.', 'protectionLevel' : 'dangerous', 'label' : 'change your UI settings'}, 'android.permission.READ_CONTACTS' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to read all of the contact (address) data stored on your phone. Malicious applications can use this to send your data to other people.', 'protectionLevel' : 'dangerous', 'label' : 'read contact data'}, 'android.permission.HARDWARE_TEST' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows the application to control various peripherals for the purpose of hardware testing.', 'protectionLevel' : 'signature', 'label' : 'test hardware'}, 'android.permission.SEND_DOWNLOAD_COMPLETED_INTENTS' : {'permissionGroup' : '', 'description' : 'Allows the app to send notifications about completed downloads. Malicious apps can use this to confuse other apps that download files.', 'protectionLevel' : 'signature', 'label' : 'Send download notifications.'}, 'com.android.launcher.permission.INSTALL_SHORTCUT' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an app to add shortcuts without user intervention.', 'protectionLevel' : 'normal', 'label' : 'install shortcuts'}, 'android.permission.BIND_VPN_SERVICE' : {'permissionGroup' : '', 'description' : 'Allows the holder to bind to the top-level interface of a Vpn service. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'bind to a VPN service'}, 'android.permission.CHANGE_WIFI_MULTICAST_STATE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to receive packets not directly addressed to your device. This can be useful when discovering services offered near by. It uses more power than the non-multicast mode.', 'protectionLevel' : 'dangerous', 'label' : 'allow Wi-Fi Multicast reception'}, 'android.permission.VIBRATE' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows the application to control the vibrator.', 'protectionLevel' : 'normal', 'label' : 'control vibrator'}, 'android.permission.BIND_INPUT_METHOD' : {'permissionGroup' : '', 'description' : 'Allows the holder to bind to the top-level interface of an input method. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'bind to an input method'}, 'android.permission.SET_TIME_ZONE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change the phone\'s time zone.', 'protectionLevel' : 'dangerous', 'label' : 'set time zone'}, 'android.permission.ACCESS_CACHE_FILESYSTEM' : {'permissionGroup' : '', 'description' : 'Allows an application to read and write the cache filesystem.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'access the cache filesystem'}, 'android.permission.DOWNLOAD_CACHE_NON_PURGEABLE' : {'permissionGroup' : '', 'description' : 'Allows the app to download files to the download cache, which can\'t be automatically deleted when the download manager needs more space.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'Reserve space in the download cache'}, 'android.permission.DUMP' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows application to retrieve internal state of the system. Malicious applications may retrieve a wide variety of private and secure information that they should never normally need.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'retrieve system internal state'}, 'android.permission.WRITE_USER_DICTIONARY' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to write new words into the user dictionary.', 'protectionLevel' : 'normal', 'label' : 'write to user defined dictionary'}, 'android.permission.CRYPT_KEEPER' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.READ_LOGS' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to read from the system\'s various log files. This allows it to discover general information about what you are doing with the phone, potentially including personal or private information.', 'protectionLevel' : 'dangerous', 'label' : 'read sensitive log data'}, 'android.permission.WRITE_GSERVICES' : {'permissionGroup' : '', 'description' : 'Allows an application to modify the Google services map. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify the Google services map'}, 'android.permission.INJECT_EVENTS' : {'permissionGroup' : '', 'description' : 'Allows an application to deliver its own input events (key presses, etc.) to other applications. Malicious applications can use this to take over the phone.', 'protectionLevel' : 'signature', 'label' : 'press keys and control buttons'}, 'android.permission.BIND_DEVICE_ADMIN' : {'permissionGroup' : '', 'description' : 'Allows the holder to send intents to a device administrator. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'interact with a device admin'}, 'android.permission.FORCE_STOP_PACKAGES' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to forcibly stop other applications.', 'protectionLevel' : 'signature', 'label' : 'force stop other applications'}, 'com.android.frameworks.coretests.permission.TEST_DENIED' : {'permissionGroup' : '', 'description' : 'Used for running unit tests, for testing operations where we do not have the permission.', 'protectionLevel' : 'normal', 'label' : 'Test Denied'}, 'android.permission.WRITE_SECURE_SETTINGS' : {'permissionGroup' : '', 'description' : 'Allows an application to modify the system\'s secure settings data. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify secure system settings'}, 'android.permission.UPDATE_DEVICE_STATS' : {'permissionGroup' : '', 'description' : 'Allows the modification of collected battery statistics. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify battery statistics'}, 'android.permission.BROADCAST_PACKAGE_REMOVED' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to broadcast a notification that an application package has been removed. Malicious applications may use this to kill any other running application.', 'protectionLevel' : 'signature', 'label' : 'send package removed broadcast'}, 'android.permission.SYSTEM_ALERT_WINDOW' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to show system alert windows. Malicious applications can take over the entire screen.', 'protectionLevel' : 'dangerous', 'label' : 'display system-level alerts'}, 'com.android.cts.permissionNotUsedWithSignature' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.ACCESS_LOCATION_EXTRA_COMMANDS' : {'permissionGroup' : 'android.permission-group.LOCATION', 'description' : 'Access extra location provider commands. Malicious applications could use this to interfere with the operation of the GPS or other location sources.', 'protectionLevel' : 'normal', 'label' : 'access extra location provider commands'}, 'android.permission.BRICK' : {'permissionGroup' : '', 'description' : 'Allows the application to disable the entire phone permanently. This is very dangerous.', 'protectionLevel' : 'signature', 'label' : 'permanently disable phone'}, 'com.android.browser.permission.WRITE_HISTORY_BOOKMARKS' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to modify the Browser\'s history or bookmarks stored on your phone. Malicious applications can use this to erase or modify your Browser\'s data.', 'protectionLevel' : 'dangerous', 'label' : 'write Browser\'s history and bookmarks'}, 'android.permission.CHANGE_WIFI_STATE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to connect to and disconnect from Wi-Fi access points, and to make changes to configured Wi-Fi networks.', 'protectionLevel' : 'dangerous', 'label' : 'change Wi-Fi state'}, 'android.permission.RECORD_AUDIO' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows application to access the audio record path.', 'protectionLevel' : 'dangerous', 'label' : 'record audio'}, 'android.permission.MODIFY_PHONE_STATE' : {'permissionGroup' : 'android.permission-group.PHONE_CALLS', 'description' : 'Allows the application to control the phone features of the device. An application with this permission can switch networks, turn the phone radio on and off and the like without ever notifying you.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify phone state'}, 'android.permission.READ_PROFILE' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to read personal profile information stored on your device, such as your name and contact information. This means the application can identify you and send your profile information to others.', 'protectionLevel' : 'dangerous', 'label' : 'read your profile data'}, 'android.permission.ACCOUNT_MANAGER' : {'permissionGroup' : 'android.permission-group.ACCOUNTS', 'description' : 'Allows an application to make calls to AccountAuthenticators', 'protectionLevel' : 'signature', 'label' : 'act as the AccountManagerService'}, 'android.permission.SET_ANIMATION_SCALE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change the global animation speed (faster or slower animations) at any time.', 'protectionLevel' : 'dangerous', 'label' : 'modify global animation speed'}, 'android.permission.SET_PROCESS_LIMIT' : {'permissionGroup' : 'android.permission-group.DEVELOPMENT_TOOLS', 'description' : 'Allows an application to control the maximum number of processes that will run. Never needed for normal applications.', 'protectionLevel' : 'dangerous', 'label' : 'limit number of running processes'}, 'android.permission.SET_PREFERRED_APPLICATIONS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to modify your preferred applications. This can allow malicious applications to silently change the applications that are run, spoofing your existing applications to collect private data from you.', 'protectionLevel' : 'signature', 'label' : 'set preferred applications'}, 'android.permission.WRITE_PROFILE' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to change or add to personal profile information stored on your device, such as your name and contact information. This means other applications can identify you and send your profile information to others.', 'protectionLevel' : 'dangerous', 'label' : 'write to your profile data'}, 'android.permission.SET_DEBUG_APP' : {'permissionGroup' : 'android.permission-group.DEVELOPMENT_TOOLS', 'description' : 'Allows an application to turn on debugging for another application. Malicious applications can use this to kill other applications.', 'protectionLevel' : 'dangerous', 'label' : 'enable application debugging'}, 'android.permission.INSTALL_DRM' : {'permissionGroup' : '', 'description' : 'Allows application to install DRM-protected content.', 'protectionLevel' : 'normal', 'label' : 'Install DRM content.'}, 'android.permission.WRITE_SYNC_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to modify the sync settings, such as whether sync is enabled for Contacts.', 'protectionLevel' : 'dangerous', 'label' : 'write sync settings'}, 'android.permission.BLUETOOTH' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to view configuration of the local Bluetooth phone, and to make and accept connections with paired devices.', 'protectionLevel' : 'dangerous', 'label' : 'create Bluetooth connections'}, 'android.permission.CAMERA' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows application to take pictures and videos with the camera. This allows the application at any time to collect images the camera is seeing.', 'protectionLevel' : 'dangerous', 'label' : 'take pictures and videos'}, 'android.permission.SET_WALLPAPER_HINTS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to set the system wallpaper size hints.', 'protectionLevel' : 'normal', 'label' : 'set wallpaper size hints'}, 'android.permission.WAKE_LOCK' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to prevent the phone from going to sleep.', 'protectionLevel' : 'dangerous', 'label' : 'prevent phone from sleeping'}, 'com.android.frameworks.coretests.SIGNATURE' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.REBOOT' : {'permissionGroup' : '', 'description' : 'Allows the application to force the phone to reboot.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'force phone reboot'}, 'android.permission.READ_PRIVILEGED_PHONE_STATE' : {'permissionGroup' : 'android.permission-group.PHONE_CALLS', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.BROADCAST_WAP_PUSH' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows an application to broadcast a notification that a WAP PUSH message has been received. Malicious applications may use this to forge MMS message receipt or to silently replace the content of any web page with malicious variants.', 'protectionLevel' : 'signature', 'label' : 'send WAP-PUSH-received broadcast'}, 'android.permission.SET_WALLPAPER_COMPONENT' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.ACCESS_BLUETOOTH_SHARE' : {'permissionGroup' : '', 'description' : 'Allows the app to access the BluetoothShare manager and use it to transfer files.', 'protectionLevel' : 'signature', 'label' : 'Access download manager.'}, 'android.intent.category.MASTER_CLEAR.permission.C2D_MESSAGE' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.STATUS_BAR' : {'permissionGroup' : '', 'description' : 'Allows application to disable the status bar or add and remove system icons.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'disable or modify status bar'}, 'android.permission.CHANGE_WIMAX_STATE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to connect to and disconnect from WiMAX network.', 'protectionLevel' : 'dangerous', 'label' : 'change WiMAX state'}, 'com.android.browser.permission.READ_HISTORY_BOOKMARKS' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to read all the URLs that the Browser has visited, and all of the Browser\'s bookmarks.', 'protectionLevel' : 'dangerous', 'label' : 'read Browser\'s history and bookmarks'}, 'android.permission.ACCESS_DRM' : {'permissionGroup' : '', 'description' : 'Allows application to access DRM-protected content.', 'protectionLevel' : 'signature', 'label' : 'Access DRM content.'}, 'android.permission.RECEIVE_SMS' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to receive and process SMS messages. Malicious applications may monitor your messages or delete them without showing them to you.', 'protectionLevel' : 'dangerous', 'label' : 'receive SMS'}, 'android.permission.WRITE_CONTACTS' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to modify the contact (address) data stored on your phone. Malicious applications can use this to erase or modify your contact data.', 'protectionLevel' : 'dangerous', 'label' : 'write contact data'}, 'android.permission.CONTROL_LOCATION_UPDATES' : {'permissionGroup' : '', 'description' : 'Allows enabling/disabling location update notifications from the radio. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'control location update notifications'}, 'android.permission.BIND_APPWIDGET' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to tell the system which widgets can be used by which application. With this permission, applications can give access to personal data to other applications. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'choose widgets'}, 'com.android.frameworks.coretests.permission.TEST_GRANTED' : {'permissionGroup' : '', 'description' : 'Used for running unit tests, for testing operations where we have the permission.', 'protectionLevel' : 'normal', 'label' : 'Test Granted'}, 'android.permission.SIGNAL_PERSISTENT_PROCESSES' : {'permissionGroup' : 'android.permission-group.DEVELOPMENT_TOOLS', 'description' : 'Allows application to request that the supplied signal be sent to all persistent processes.', 'protectionLevel' : 'dangerous', 'label' : 'send Linux signals to applications'}, 'android.permission.ASEC_CREATE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to create internal storage.', 'protectionLevel' : 'signature', 'label' : 'create internal storage'}, 'android.permission.INSTALL_LOCATION_PROVIDER' : {'permissionGroup' : '', 'description' : 'Create mock location sources for testing. Malicious applications can use this to override the location and/or status returned by real location sources such as GPS or Network providers or monitor and report your location to an external source.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'permission to install a location provider'}, 'android.permission.ACCESS_DOWNLOAD_MANAGER_ADVANCED' : {'permissionGroup' : '', 'description' : 'Allows the app to access the download manager\'s advanced functions. Malicious apps can use this to disrupt downloads and access private information.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'Advanced download manager functions.'}, 'android.permission.WRITE_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to modify the system\'s settings data. Malicious applications can corrupt your system\'s configuration.', 'protectionLevel' : 'dangerous', 'label' : 'modify global system settings'}, 'android.permission.MASTER_CLEAR' : {'permissionGroup' : '', 'description' : 'Allows an application to completely reset the system to its factory settings, erasing all data, configuration, and installed applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'reset system to factory defaults'}, 'android.permission.READ_INPUT_STATE' : {'permissionGroup' : '', 'description' : 'Allows applications to watch the keys you press even when interacting with another application (such as entering a password). Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'record what you type and actions you take'}, 'android.permission.MANAGE_APP_TOKENS' : {'permissionGroup' : '', 'description' : 'Allows applications to create and manage their own tokens, bypassing their normal Z-ordering. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'manage application tokens'}, 'com.android.email.permission.ACCESS_PROVIDER' : {'permissionGroup' : '', 'description' : 'Allows this application to access your email database, including received messages, sent messages, usernames, and passwords.', 'protectionLevel' : 'signature', 'label' : 'Access email provider data'}, 'android.permission.PACKAGE_VERIFICATION_AGENT' : {'permissionGroup' : '', 'description' : 'Allows the application to verify a package is installable.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'verify packages'}, 'android.permission.CONFIRM_FULL_BACKUP' : {'permissionGroup' : '', 'description' : 'Allows the application to launch the full backup confirmation UI. Not to be used by any application.', 'protectionLevel' : 'signature', 'label' : 'confirm a full backup or restore operation'}, 'com.android.smspush.WAPPUSH_MANAGER_BIND' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.ACCESS_WIMAX_STATE' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to view the information about the state of WiMAX.', 'protectionLevel' : 'normal', 'label' : 'view WiMAX state'}, 'com.android.launcher.permission.WRITE_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an app to change the settings and shortcuts in Home.', 'protectionLevel' : 'normal', 'label' : 'write Home settings and shortcuts'}, 'android.permission.MODIFY_AUDIO_SETTINGS' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows application to modify global audio settings such as volume and routing.', 'protectionLevel' : 'dangerous', 'label' : 'change your audio settings'}, 'android.permission.ASEC_ACCESS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to get information on internal storage.', 'protectionLevel' : 'signature', 'label' : 'get information on internal storage'}, 'android.permission.USE_SIP' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to use the SIP service to make/receive Internet calls.', 'protectionLevel' : 'dangerous', 'label' : 'make/receive Internet calls'}, 'android.permission.WRITE_APN_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change network settings and to intercept and inspect all network traffic, for example to change the proxy and port of any APN. Malicious applications could monitor, redirect, or modify network packets without your knowledge.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'change/intercept network settings and traffic'}, 'android.permission.ACCESS_SURFACE_FLINGER' : {'permissionGroup' : '', 'description' : 'Allows application to use SurfaceFlinger low-level features.', 'protectionLevel' : 'signature', 'label' : 'access SurfaceFlinger'}, 'android.permission.MOVE_PACKAGE' : {'permissionGroup' : '', 'description' : 'Allows an application to move application resources from internal to external media and vice versa.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'Move application resources'}, 'android.permission.NET_ADMIN' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.ALLOW_ANY_CODEC_FOR_PLAYBACK' : {'permissionGroup' : '', 'description' : 'Allows an application to use any installed media decoder to decode for playback.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'use any media decoder for playback'}, 'android.permission.MANAGE_USB' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows the application to manage preferences and permissions for USB devices.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'manage preferences and permissions for USB devices'}, 'android.permission.CHANGE_BACKGROUND_DATA_SETTING' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change the background data usage setting.', 'protectionLevel' : 'signature', 'label' : 'change background data usage setting'}, 'android.permission.PROCESS_OUTGOING_CALLS' : {'permissionGroup' : 'android.permission-group.PHONE_CALLS', 'description' : 'Allows application to process outgoing calls and change the number to be dialed. Malicious applications may monitor, redirect, or prevent outgoing calls.', 'protectionLevel' : 'dangerous', 'label' : 'intercept outgoing calls'}, 'android.permission.CALL_PRIVILEGED' : {'permissionGroup' : '', 'description' : 'Allows the application to call any phone number, including emergency numbers, without your intervention. Malicious applications may place unnecessary and illegal calls to emergency services.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'directly call any phone numbers'}, 'android.permission.WRITE_CALENDAR' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to send event invitations as the calendar owner and add, remove, change events that you can modify on your device, including those of friends or co-workers. A malicious application with this permission can send spam emails that appear to come from calendar owners, modify events without the owners\' knowledge, or add fake events.', 'protectionLevel' : 'dangerous', 'label' : 'add or modify calendar events and send email to guests without owners\' knowledge'}, 'android.permission.NFC' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to communicate with Near Field Communication (NFC) tags, cards, and readers.', 'protectionLevel' : 'dangerous', 'label' : 'control Near Field Communication'}, 'android.permission.MANAGE_ACCOUNTS' : {'permissionGroup' : 'android.permission-group.ACCOUNTS', 'description' : 'Allows an application to perform operations like adding, and removing accounts and deleting their password.', 'protectionLevel' : 'dangerous', 'label' : 'manage the accounts list'}, 'android.permission.SEND_SMS' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : 'Allows application to send SMS messages. Malicious applications may cost you money by sending messages without your confirmation.', 'protectionLevel' : 'dangerous', 'label' : 'send SMS messages'}, 'android.permission.BIND_REMOTEVIEWS' : {'permissionGroup' : '', 'description' : 'Allows the holder to bind to the top-level interface of a widget service. Should never be needed for normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'bind to a widget service'}, 'android.permission.ACCESS_MOCK_LOCATION' : {'permissionGroup' : 'android.permission-group.LOCATION', 'description' : 'Create mock location sources for testing. Malicious applications can use this to override the location and/or status returned by real location sources such as GPS or Network providers.', 'protectionLevel' : 'dangerous', 'label' : 'mock location sources for testing'}, 'android.permission.PERFORM_CDMA_PROVISIONING' : {'permissionGroup' : '', 'description' : 'Allows the application to start CDMA provisioning. Malicious applications may unnecessarily start CDMA provisioning', 'protectionLevel' : 'signatureOrSystem', 'label' : 'directly start CDMA phone setup'}, 'android.permission.WRITE_SMS' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to write to SMS messages stored on your phone or SIM card. Malicious applications may delete your messages.', 'protectionLevel' : 'dangerous', 'label' : 'edit SMS or MMS'}, 'android.permission.ACCESS_ALL_DOWNLOADS' : {'permissionGroup' : '', 'description' : 'Allows the app to view and modify all downloads initiated by any app on the system.', 'protectionLevel' : 'signature', 'label' : 'Access all system downloads'}, 'android.permission.DELETE_PACKAGES' : {'permissionGroup' : '', 'description' : 'Allows an application to delete Android packages. Malicious applications can use this to delete important applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'delete applications'}, 'android.permission.COPY_PROTECTED_DATA' : {'permissionGroup' : '', 'description' : 'Allows to invoke default container service to copy content. Not for use by normal applications.', 'protectionLevel' : 'signature', 'label' : 'Allows to invoke default container service to copy content. Not for use by normal applications.'}, 'android.permission.ACCESS_CHECKIN_PROPERTIES' : {'permissionGroup' : '', 'description' : 'Allows read/write access to properties uploaded by the checkin service. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'access checkin properties'}, 'android.permission.MOUNT_UNMOUNT_FILESYSTEMS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to mount and unmount filesystems for removable storage.', 'protectionLevel' : 'dangerous', 'label' : 'mount and unmount filesystems'}, 'android.permission.DOWNLOAD_WITHOUT_NOTIFICATION' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows the app to download files through the download manager without any notification being shown to the user.', 'protectionLevel' : 'normal', 'label' : 'download files without notification'}, 'android.permission.RETRIEVE_WINDOW_CONTENT' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows application to retrieve the content of the active window. Malicious applications may retrieve the entire window content and examine all its text except passwords.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'retrieve screen content'}, 'com.android.email.permission.READ_ATTACHMENT' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows this application to read your email attachments.', 'protectionLevel' : 'dangerous', 'label' : 'Read email attachments'}, 'android.permission.SET_TIME' : {'permissionGroup' : '', 'description' : 'Allows an application to change the phone\'s clock time.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'set time'}, 'android.permission.BATTERY_STATS' : {'permissionGroup' : '', 'description' : 'Allows the modification of collected battery statistics. Not for use by normal applications.', 'protectionLevel' : 'normal', 'label' : 'modify battery statistics'}, 'android.app.cts.permission.TEST_GRANTED' : {'permissionGroup' : '', 'description' : 'Used for running CTS tests, for testing operations where we have the permission.', 'protectionLevel' : 'normal', 'label' : 'Test Granted'}, 'android.permission.DIAGNOSTIC' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to read and write to any resource owned by the diag group; for example, files in /dev. This could potentially affect system stability and security. This should be ONLY be used for hardware-specific diagnostics by the manufacturer or operator.', 'protectionLevel' : 'signature', 'label' : 'read/write to resources owned by diag'}, 'com.android.cts.permissionAllowedWithSignature' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.CALL_PHONE' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : 'Allows the application to call phone numbers without your intervention. Malicious applications may cause unexpected calls on your phone bill. Note that this does not allow the application to call emergency numbers.', 'protectionLevel' : 'dangerous', 'label' : 'directly call phone numbers'}, 'android.permission.MOUNT_FORMAT_FILESYSTEMS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to format removable storage.', 'protectionLevel' : 'dangerous', 'label' : 'format external storage'}, 'android.permission.READ_PHONE_STATE' : {'permissionGroup' : 'android.permission-group.PHONE_CALLS', 'description' : 'Allows the application to access the phone features of the device. An application with this permission can determine the phone number and serial number of this phone, whether a call is active, the number that call is connected to and the like.', 'protectionLevel' : 'dangerous', 'label' : 'read phone state and identity'}, 'android.permission.ACCESS_COARSE_LOCATION' : {'permissionGroup' : 'android.permission-group.LOCATION', 'description' : 'Access coarse location sources such as the cellular network database to determine an approximate phone location, where available. Malicious applications can use this to determine approximately where you are.', 'protectionLevel' : 'dangerous', 'label' : 'coarse (network-based) location'}, 'android.permission.CLEAR_APP_USER_DATA' : {'permissionGroup' : '', 'description' : 'Allows an application to clear user data.', 'protectionLevel' : 'signature', 'label' : 'delete other applications\' data'}, 'android.permission.BROADCAST_SMS' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows an application to broadcast a notification that an SMS message has been received. Malicious applications may use this to forge incoming SMS messages.', 'protectionLevel' : 'signature', 'label' : 'send SMS-received broadcast'}, 'android.permission.KILL_BACKGROUND_PROCESSES' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to kill background processes of other applications, even if memory isn\'t low.', 'protectionLevel' : 'normal', 'label' : 'kill background processes'}, 'android.permission.STOP_APP_SWITCHES' : {'permissionGroup' : '', 'description' : 'Prevents the user from switching to another application.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'prevent app switches'}, 'android.permission.ACCESS_WIFI_STATE' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : 'Allows an application to view the information about the state of Wi-Fi.', 'protectionLevel' : 'normal', 'label' : 'view Wi-Fi state'}, 'android.permission.RECEIVE_MMS' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to receive and process MMS messages. Malicious applications may monitor your messages or delete them without showing them to you.', 'protectionLevel' : 'dangerous', 'label' : 'receive MMS'}, 'android.permission.GLOBAL_SEARCH_CONTROL' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.ACCESS_DOWNLOAD_MANAGER' : {'permissionGroup' : '', 'description' : 'Allows the app to access the download manager and to use it to download files. Malicious apps can use this to disrupt downloads and access private information.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'Access download manager.'}, 'android.permission.STATUS_BAR_SERVICE' : {'permissionGroup' : '', 'description' : 'Allows the application to be the status bar.', 'protectionLevel' : 'signature', 'label' : 'status bar'}, 'android.permission.DELETE_CACHE_FILES' : {'permissionGroup' : '', 'description' : 'Allows an application to delete cache files.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'delete other applications\' caches'}, 'android.permission.SET_POINTER_SPEED' : {'permissionGroup' : '', 'description' : 'Allows an application to change the mouse or trackpad pointer speed at any time. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'change pointer speed'}, 'android.permission.RESTART_PACKAGES' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to kill background processes of other applications, even if memory isn\'t low.', 'protectionLevel' : 'normal', 'label' : 'kill background processes'}, 'android.permission.MODIFY_NETWORK_ACCOUNTING' : {'permissionGroup' : '', 'description' : 'Allows modification of how network usage is accounted against applications. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify network usage accounting'}, 'android.permission.GET_ACCOUNTS' : {'permissionGroup' : 'android.permission-group.ACCOUNTS', 'description' : 'Allows an application to get the list of accounts known by the phone.', 'protectionLevel' : 'normal', 'label' : 'discover known accounts'}, 'android.permission.SUBSCRIBED_FEEDS_READ' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to get details about the currently synced feeds.', 'protectionLevel' : 'normal', 'label' : 'read subscribed feeds'}, 'android.permission.CHANGE_NETWORK_STATE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to change the state of network connectivity.', 'protectionLevel' : 'dangerous', 'label' : 'change network connectivity'}, 'android.permission.READ_SYNC_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to read the sync settings, such as whether sync is enabled for Contacts.', 'protectionLevel' : 'normal', 'label' : 'read sync settings'}, 'android.permission.DISABLE_KEYGUARD' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to disable the keylock and any associated password security. A legitimate example of this is the phone disabling the keylock when receiving an incoming phone call, then re-enabling the keylock when the call is finished.', 'protectionLevel' : 'dangerous', 'label' : 'disable keylock'}, 'android.permission.BIND_PACKAGE_VERIFIER' : {'permissionGroup' : '', 'description' : 'Allows the holder to make requests of package verifiers. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'bind to a package verifier'}, 'com.android.launcher.permission.UNINSTALL_SHORTCUT' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an app to remove shortcuts without user intervention.', 'protectionLevel' : 'normal', 'label' : 'uninstall shortcuts'}, 'android.permission.USE_CREDENTIALS' : {'permissionGroup' : 'android.permission-group.ACCOUNTS', 'description' : 'Allows an application to request authentication tokens.', 'protectionLevel' : 'dangerous', 'label' : 'use the authentication credentials of an account'}, 'android.permission.SUBSCRIBED_FEEDS_WRITE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to modify your currently synced feeds. This could allow a malicious application to change your synced feeds.', 'protectionLevel' : 'dangerous', 'label' : 'write subscribed feeds'}, 'android.permission.READ_USER_DICTIONARY' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows an application to read any private words, names and phrases that the user may have stored in the user dictionary.', 'protectionLevel' : 'dangerous', 'label' : 'read user defined dictionary'}, 'android.permission.WRITE_MEDIA_STORAGE' : {'permissionGroup' : 'android.permission-group.STORAGE', 'description' : 'Allows an application to modify the contents of the internal media storage.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'modify/delete internal media storage contents'}, 'android.permission.FACTORY_TEST' : {'permissionGroup' : '', 'description' : 'Run as a low-level manufacturer test, allowing complete access to the phone hardware. Only available when a phone is running in manufacturer test mode.', 'protectionLevel' : 'signature', 'label' : 'run in factory test mode'}, 'android.permission.CHANGE_COMPONENT_ENABLED_STATE' : {'permissionGroup' : '', 'description' : 'Allows an application to change whether a component of another application is enabled or not. Malicious applications can use this to disable important phone capabilities. Care must be used with this permission, as it is possible to get application components into an unusable, inconsistent, or unstable state.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'enable or disable application components'}, 'android.permission.RECEIVE_BOOT_COMPLETED' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to have itself started as soon as the system has finished booting. This can make it take longer to start the phone and allow the application to slow down the overall phone by always running.', 'protectionLevel' : 'normal', 'label' : 'automatically start at boot'}, 'com.android.voicemail.permission.ADD_VOICEMAIL' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to add messages to your voicemail inbox.', 'protectionLevel' : 'dangerous', 'label' : 'add voicemail'}, 'android.permission.BACKUP' : {'permissionGroup' : '', 'description' : 'Allows the application to control the system\'s backup and restore mechanism. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'control system backup and restore'}, 'com.android.voicemail.permission.READ_WRITE_ALL_VOICEMAIL' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to store and retrieve all voicemails that this device can access.', 'protectionLevel' : 'signature', 'label' : 'Access all voicemails'}, 'android.permission.BLUETOOTH_ADMIN' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to configure the local Bluetooth phone, and to discover and pair with remote devices.', 'protectionLevel' : 'dangerous', 'label' : 'bluetooth administration'}, 'android.permission.ACCESS_FINE_LOCATION' : {'permissionGroup' : 'android.permission-group.LOCATION', 'description' : 'Access fine location sources such as the Global Positioning System on the phone, where available. Malicious applications can use this to determine where you are, and may consume additional battery power.', 'protectionLevel' : 'dangerous', 'label' : 'fine (GPS) location'}, 'android.permission.ASEC_RENAME' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to rename internal storage.', 'protectionLevel' : 'signature', 'label' : 'rename internal storage'}, 'android.permission.PERSISTENT_ACTIVITY' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to make parts of itself persistent, so the system can\'t use it for other applications.', 'protectionLevel' : 'dangerous', 'label' : 'make application always run'}, 'android.permission.REORDER_TASKS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to move tasks to the foreground and background. Malicious applications can force themselves to the front without your control.', 'protectionLevel' : 'dangerous', 'label' : 'reorder running applications'}, 'android.permission.BIND_TEXT_SERVICE' : {'permissionGroup' : '', 'description' : 'Allows the holder to bind to the top-level interface of a text service(e.g. SpellCheckerService). Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'bind to a text service'}, 'android.permission.RECEIVE_WAP_PUSH' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to receive and process WAP messages. Malicious applications may monitor your messages or delete them without showing them to you.', 'protectionLevel' : 'dangerous', 'label' : 'receive WAP'}, 'android.permission.DEVICE_POWER' : {'permissionGroup' : '', 'description' : 'Allows the application to turn the phone on or off.', 'protectionLevel' : 'signature', 'label' : 'power phone on or off'}, 'android.permission.EXPAND_STATUS_BAR' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows application to expand or collapse the status bar.', 'protectionLevel' : 'normal', 'label' : 'expand/collapse status bar'}, 'android.permission.SET_WALLPAPER' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to set the system wallpaper.', 'protectionLevel' : 'normal', 'label' : 'set wallpaper'}, 'android.permission.ASEC_DESTROY' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to destroy internal storage.', 'protectionLevel' : 'signature', 'label' : 'destroy internal storage'}, 'android.permission.CONNECTIVITY_INTERNAL' : {'permissionGroup' : 'android.permission-group.NETWORK', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.WRITE_EXTERNAL_STORAGE' : {'permissionGroup' : 'android.permission-group.STORAGE', 'description' : 'Allows an application to write to the SD card.', 'protectionLevel' : 'dangerous', 'label' : 'modify/delete SD card contents'}, 'android.permission.GET_PACKAGE_SIZE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to retrieve its code, data, and cache sizes', 'protectionLevel' : 'normal', 'label' : 'measure application storage space'}, 'com.android.frameworks.coretests.DANGEROUS' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : '', 'protectionLevel' : 'dangerous', 'label' : ''}, 'android.permission.WRITE_SOCIAL_STREAM' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to display social updates from your friends. Malicious apps can use this to pretend to be a friend and trick you into revealing passwords or other confidential information.', 'protectionLevel' : 'dangerous', 'label' : 'write to your social stream'}, 'android.permission.ASEC_MOUNT_UNMOUNT' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows the application to mount / unmount internal storage.', 'protectionLevel' : 'signature', 'label' : 'mount / unmount internal storage'}, 'android.permission.INSTALL_PACKAGES' : {'permissionGroup' : '', 'description' : 'Allows an application to install new or updated Android packages. Malicious applications can use this to add new applications with arbitrarily powerful permissions.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'directly install applications'}, 'android.permission.AUTHENTICATE_ACCOUNTS' : {'permissionGroup' : 'android.permission-group.ACCOUNTS', 'description' : 'Allows an application to use the account authenticator capabilities of the AccountManager, including creating accounts and getting and setting their passwords.', 'protectionLevel' : 'dangerous', 'label' : 'act as an account authenticator'}, 'android.permission.RECEIVE_EMERGENCY_BROADCAST' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to receive and process emergency broadcast messages. This permission is only available to system applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'receive emergency broadcasts'}, 'com.android.launcher.permission.READ_SETTINGS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an app to read the settings and shortcuts in Home.', 'protectionLevel' : 'normal', 'label' : 'read Home settings and shortcuts'}, 'com.android.alarm.permission.SET_ALARM' : {'permissionGroup' : 'android.permission-group.PERSONAL_INFO', 'description' : 'Allows the application to set an alarm in an installed alarm clock application. Some alarm clock applications may not implement this feature.', 'protectionLevel' : 'normal', 'label' : 'set alarm in alarm clock'}, 'android.permission.INTERNAL_SYSTEM_WINDOW' : {'permissionGroup' : '', 'description' : 'Allows the creation of windows that are intended to be used by the internal system user interface. Not for use by normal applications.', 'protectionLevel' : 'signature', 'label' : 'display unauthorized windows'}, 'com.android.browser.permission.PRELOAD' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : 'Preload results'}, 'android.permission.GET_TASKS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows application to retrieve information about currently and recently running tasks. May allow malicious applications to discover private information about other applications.', 'protectionLevel' : 'dangerous', 'label' : 'retrieve running applications'}, 'android.permission.SET_ORIENTATION' : {'permissionGroup' : '', 'description' : 'Allows an application to change the rotation of the screen at any time. Should never be needed for normal applications.', 'protectionLevel' : 'signature', 'label' : 'change screen orientation'}, 'android.permission.SET_ACTIVITY_WATCHER' : {'permissionGroup' : '', 'description' : 'Allows an application to monitor and control how the system launches activities. Malicious applications may completely compromise the system. This permission is only needed for development, never for normal use.', 'protectionLevel' : 'signature', 'label' : 'monitor and control all application launching'}, 'com.android.frameworks.coretests.NORMAL' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : '', 'protectionLevel' : 'normal', 'label' : ''}, 'android.permission.READ_SMS' : {'permissionGroup' : 'android.permission-group.MESSAGES', 'description' : 'Allows application to read SMS messages stored on your phone or SIM card. Malicious applications may read your confidential messages.', 'protectionLevel' : 'dangerous', 'label' : 'read SMS or MMS'}, 'android.permission.BROADCAST_STICKY' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to send sticky broadcasts, which remain after the broadcast ends. Malicious applications can make the phone slow or unstable by causing it to use too much memory.', 'protectionLevel' : 'normal', 'label' : 'send sticky broadcast'}, 'android.permission.GLOBAL_SEARCH' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : '', 'protectionLevel' : 'signatureOrSystem', 'label' : ''}, 'android.permission.SEND_SMS_NO_CONFIRMATION' : {'permissionGroup' : 'android.permission-group.COST_MONEY', 'description' : 'Allows application to send SMS messages. Malicious applications may cost you money by sending messages without your confirmation.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'send SMS messages with no confirmation'}, 'com.android.cts.permissionWithSignature' : {'permissionGroup' : '', 'description' : '', 'protectionLevel' : 'signature', 'label' : ''}, 'android.permission.REMOVE_TASKS' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to remove tasks and kill their applications. Malicious applications can disrupt the behavior of other applications.', 'protectionLevel' : 'signature', 'label' : 'stop running applications'}, 'android.permission.PACKAGE_USAGE_STATS' : {'permissionGroup' : '', 'description' : 'Allows the modification of collected component usage statistics. Not for use by normal applications.', 'protectionLevel' : 'signatureOrSystem', 'label' : 'update component usage statistics'}, 'android.permission.SET_ALWAYS_FINISH' : {'permissionGroup' : 'android.permission-group.DEVELOPMENT_TOOLS', 'description' : 'Allows an application to control whether activities are always finished as soon as they go to the background. Never needed for normal applications.', 'protectionLevel' : 'dangerous', 'label' : 'make all background applications close'}, 'android.permission.CLEAR_APP_CACHE' : {'permissionGroup' : 'android.permission-group.SYSTEM_TOOLS', 'description' : 'Allows an application to free phone storage by deleting files in application cache directory. Access is very restricted usually to system process.', 'protectionLevel' : 'dangerous', 'label' : 'delete all application cache data'}, 'android.permission.MANAGE_NETWORK_POLICY' : {'permissionGroup' : '', 'description' : 'Allows an application to manage network policies and define application-specific rules.', 'protectionLevel' : 'signature', 'label' : 'manage network policy'}, 'android.permission.FLASHLIGHT' : {'permissionGroup' : 'android.permission-group.HARDWARE_CONTROLS', 'description' : 'Allows the application to control the flashlight.', 'protectionLevel' : 'normal', 'label' : 'control flashlight'}, } AOSP_PERMISSION_GROUPS = { 'android.permission-group.NETWORK' : {'description' : 'Allow applications to access various network features.', 'label' : 'Network communication'}, 'android.permission-group.STORAGE' : {'description' : 'Access the SD card.', 'label' : 'Storage'}, 'android.permission-group.MESSAGES' : {'description' : 'Read and write your SMS, e-mail, and other messages.', 'label' : 'Your messages'}, 'android.permission-group.PERSONAL_INFO' : {'description' : 'Direct access to your contacts and calendar stored on the phone.', 'label' : 'Your personal information'}, 'android.permission-group.DEVELOPMENT_TOOLS' : {'description' : 'Features only needed for application developers.', 'label' : 'Development tools'}, 'android.permission-group.COST_MONEY' : {'description' : '', 'label' : ''}, 'android.permission-group.ACCOUNTS' : {'description' : 'Access the available accounts.', 'label' : 'Your accounts'}, 'android.permission-group.LOCATION' : {'description' : 'Monitor your physical location', 'label' : 'Your location'}, 'android.permission-group.HARDWARE_CONTROLS' : {'description' : 'Direct access to hardware on the handset.', 'label' : 'Hardware controls'}, 'android.permission-group.SYSTEM_TOOLS' : {'description' : 'Lower-level access and control of the system.', 'label' : 'System tools'}, 'android.permission-group.PHONE_CALLS' : {'description' : 'Monitor, record, and process phone calls.', 'label' : 'Phone calls'}, } #################################################
ganshun666/micropython
refs/heads/redbear-duo
tests/basics/generator_closure.py
116
# a generator that closes over outer variables def f(): x = 1 # closed over by g def g(): yield x yield x + 1 return g() for i in f(): print(i) # a generator that has its variables closed over def f(): x = 1 # closed over by g def g(): return x + 1 yield g() x = 2 yield g() for i in f(): print(i) # using comprehensions, the inner generator closes over y generator_of_generators = (((x, y) for x in range(2)) for y in range(3)) for i in generator_of_generators: for j in i: print(j) # test that printing of closed-over generators doesn't lead to segfaults def genc(): foo = 1 repr(lambda: (yield foo)) genc()
Sticklyman1936/workload-automation
refs/heads/master
wlauto/devices/android/note3/__init__.py
9
# Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from wlauto import AndroidDevice, Parameter from wlauto.exceptions import TimeoutError from wlauto.utils.android import adb_shell class Note3Device(AndroidDevice): name = 'Note3' description = """ Adapter for Galaxy Note 3. To be able to use Note3 in WA, the following must be true: - USB Debugging Mode is enabled. - Generate USB debugging authorisation for the host machine """ parameters = [ Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True), Parameter('core_clusters', default=[0, 0, 0, 0], override=True), Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True), ] def __init__(self, **kwargs): super(Note3Device, self).__init__(**kwargs) self._just_rebooted = False def initialize(self, context): self.execute('svc power stayon true', check_exit_code=False) def reset(self): super(Note3Device, self).reset() self._just_rebooted = True def hard_reset(self): super(Note3Device, self).hard_reset() self._just_rebooted = True def connect(self): # NOQA pylint: disable=R0912 super(Note3Device, self).connect() if self._just_rebooted: self.logger.debug('Waiting for boot to complete...') # On the Note 3, adb connection gets reset some time after booting. # This causes errors during execution. To prevent this, open a shell # session and wait for it to be killed. Once its killed, give adb # enough time to restart, and then the device should be ready. try: adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member time.sleep(5) # give adb time to re-initialize except TimeoutError: pass # timed out waiting for the session to be killed -- assume not going to be. self.logger.debug('Boot completed.') self._just_rebooted = False # Swipe upwards to unlock the screen. time.sleep(self.long_delay) self.execute('input touchscreen swipe 540 1600 560 800 ')
muntasirsyed/intellij-community
refs/heads/master
python/testData/copyPaste/Indent8982.src.py
83
<selection>a = 1 b = 1 </selection>c = 1 d = 1
pbrunet/pythran
refs/heads/master
pythran/tests/openmp.legacy/omp_single.py
4
def omp_single(): nr_threads_in_single = 0 result = 0 nr_iterations = 0 LOOPCOUNT = 1000 if 'omp parallel': for i in xrange(LOOPCOUNT): if 'omp single': 'omp flush' nr_threads_in_single += 1 'omp flush' nr_iterations += 1 nr_threads_in_single -= 1 result += nr_threads_in_single return result == 0 and nr_iterations == LOOPCOUNT
Xeralux/tensorflow
refs/heads/master
tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler_test.py
15
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for checking stats accumulator related ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler from tensorflow.contrib.boosted_trees.proto import learner_pb2 from tensorflow.contrib.boosted_trees.proto import split_info_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import resources from tensorflow.python.platform import googletest def get_empty_tensors(gradient_shape, hessian_shape): empty_hess_shape = [1] + hessian_shape.as_list() empty_grad_shape = [1] + gradient_shape.as_list() empty_gradients = constant_op.constant( [], dtype=dtypes.float32, shape=empty_grad_shape) empty_hessians = constant_op.constant( [], dtype=dtypes.float32, shape=empty_hess_shape) return empty_gradients, empty_hessians class DenseSplitHandlerTest(test_util.TensorFlowTestCase): def testGenerateFeatureSplitCandidates(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Dense Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | 1 | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 0.13) | 1 | 1 | dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) class_id = -1 gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.001, num_quantiles=10, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertAllEqual([0, 1], partitions) # Check the split on partition 0. # -(1.2 - 0.1) / (0.2 + 1) expected_left_weight = -0.91666 # expected_left_weight * -(1.2 - 0.1) expected_left_gain = 1.0083333333333331 # (-0.5 + 0.2 + 0.1) / (0.19 + 1) expected_right_weight = 0.1680672 # expected_right_weight * -(-0.5 + 0.2 + 0.1)) expected_right_gain = 0.033613445378151252 # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1) expected_bias_gain = 0.46043165467625885 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 0.00001) # Check the split on partition 1. # (-4 + 0.1) / (0.13 + 1) expected_left_weight = -3.4513274336283186 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_left_gain = 13.460176991150442 expected_right_weight = 0 expected_right_gain = 0 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_bias_gain = 13.460176991150442 # Verify candidate for partition 1, there's only one active bucket here # so zero gain is expected. split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split self.assertAllClose(0.0, gains[1], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.52, split_node.threshold, 0.00001) def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self): with self.test_session() as sess: dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) # Batch size is 4, 2 gradients per each instance. gradients = array_ops.constant( [[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2]) # 2x2 matrix for each instance hessian_0 = [[0.12, 0.02], [0.3, 0.11]] hessian_1 = [[0.07, -0.2], [-0.5, 0.2]] hessian_2 = [[0.2, -0.23], [-0.8, 0.9]] hessian_3 = [[0.13, -0.3], [-1.5, 2.2]] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) class_id = -1 gradient_shape = tensor_shape.TensorShape([2]) hessian_shape = tensor_shape.TensorShape([2, 2]) split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.001, num_quantiles=3, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 1e-6) def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self): with self.test_session() as sess: dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) # Batch size is 4, 2 gradients per each instance. gradients = array_ops.constant( [[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2]) # Each hessian is a diagonal of a full hessian matrix. hessian_0 = [0.12, 0.11] hessian_1 = [0.07, 0.2] hessian_2 = [0.2, 0.9] hessian_3 = [0.13, 2.2] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) class_id = -1 gradient_shape = tensor_shape.TensorShape([2]) hessian_shape = tensor_shape.TensorShape([2]) split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.001, num_quantiles=3, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 1e-6) def testGenerateFeatureSplitCandidatesInactive(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Dense Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | 1 | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 0.13) | 1 | 1 | dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.001, num_quantiles=10, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, False])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([False, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) # The handler was inactive, so it shouldn't return any splits. self.assertEqual(len(partitions), 0) self.assertEqual(len(gains), 0) self.assertEqual(len(splits), 0) def testGenerateFeatureSplitCandidatesWithTreeComplexity(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Dense Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | 1 | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 0.13) | 1 | 1 | dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0.5, min_node_weight=0, epsilon=0.001, num_quantiles=10, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertAllEqual([0, 1], partitions) # Check the split on partition 0. # -(1.2 - 0.1) / (0.2 + 1) expected_left_weight = -0.91666 # expected_left_weight * -(1.2 - 0.1) expected_left_gain = 1.0083333333333331 # (-0.5 + 0.2 + 0.1) / (0.19 + 1) expected_right_weight = 0.1680672 # expected_right_weight * -(-0.5 + 0.2 + 0.1)) expected_right_gain = 0.033613445378151252 # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1) expected_bias_gain = 0.46043165467625885 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # Make sure the gain is subtracted by the tree complexity regularization. self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain - 0.5, gains[0], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.3, split_node.threshold, 0.00001) # Check the split on partition 1. # (-4 + 0.1) / (0.13 + 1) expected_left_weight = -3.4513274336283186 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_left_gain = 13.460176991150442 expected_right_weight = 0 expected_right_gain = 0 # (-4 + 0.1) ** 2 / (0.13 + 1) expected_bias_gain = 13.460176991150442 # Verify candidate for partition 1, there's only one active bucket here # so -0.5 gain is expected (because of tree complexity. split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split self.assertAllClose(-0.5, gains[1], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.52, split_node.threshold, 0.00001) def testGenerateFeatureSplitCandidatesWithMinNodeWeight(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Dense Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | 1 | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 2.0) | 1 | 1 | dense_column = array_ops.constant([0.52, 0.52, 0.3, 0.52]) gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 2]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.DenseSplitHandler( l1_regularization=0.1, l2_regularization=1, tree_complexity_regularization=0.5, min_node_weight=1.5, epsilon=0.001, num_quantiles=10, feature_column_group_id=0, dense_float_column=dense_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertAllEqual([0, 1], partitions) # Check the gain on partition 0 to be -0.5. split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split # Make sure the gain is subtracted by the tree complexity regularization. self.assertAllClose(-0.5, gains[0], 0.00001) self.assertEqual(0, split_node.feature_column) # Check the split on partition 1. # (-4 + 0.1) / (2 + 1) expected_left_weight = -1.3 expected_right_weight = 0 # Verify candidate for partition 1, there's only one active bucket here # so -0.5 gain is expected (because of tree complexity. split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.dense_float_binary_split self.assertAllClose(-0.5, gains[1], 0.00001) self.assertAllClose([expected_left_weight], left_child.value, 0.00001) self.assertAllClose([expected_right_weight], right_child.value, 0.00001) self.assertEqual(0, split_node.feature_column) self.assertAllClose(0.52, split_node.threshold, 0.00001) class SparseSplitHandlerTest(test_util.TensorFlowTestCase): def testGenerateFeatureSplitCandidates(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Sparse Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | N/A | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 0.13) | 1 | 1 | gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64) values = array_ops.constant([0.52, 0.3, 0.52]) sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1]) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, sparse_float_column=sparse_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertAllEqual([0, 1], partitions) # Check the split on partition 0. # -(0.2 + 1.2) / (0.12 + 0.2 + 2) expected_left_weight = -0.603448275862069 # (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2) expected_left_gain = 0.8448275862068965 # 0.5 / (0.07 + 2) expected_right_weight = 0.24154589371980678 # 0.5 ** 2 / (0.07 + 2) expected_right_gain = 0.12077294685990339 # (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2) expected_bias_gain = 0.3389121338912133 split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right self.assertAllClose( expected_left_gain + expected_right_gain - expected_bias_gain, gains[0]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) # Check the split on partition 1. expected_left_weight = -1.8779342723004695 expected_right_weight = 0 # Verify candidate for partition 1, there's only one active bucket here # so zero gain is expected. split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertAllClose(0.0, gains[1]) self.assertAllClose([expected_left_weight], left_child.value) self.assertAllClose([expected_right_weight], right_child.value) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) def testGenerateFeatureSplitCandidatesMulticlassFullHessian(self): with self.test_session() as sess: # Batch is 4, 2 classes gradients = array_ops.constant( [[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]]) # 2x2 matrix for each instance hessian_0 = [[0.12, 0.02], [0.3, 0.11]] hessian_1 = [[0.07, -0.2], [-0.5, 0.2]] hessian_2 = [[0.2, -0.23], [-0.8, 0.9]] hessian_3 = [[0.13, -0.3], [-1.5, 2.2]] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3]) example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64) values = array_ops.constant([0.52, 0.3, 0.52]) sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1]) gradient_shape = tensor_shape.TensorShape([2]) hessian_shape = tensor_shape.TensorShape([2, 2]) class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, sparse_float_column=sparse_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertEqual(2, len(left_child.value)) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) def testGenerateFeatureSplitCandidatesMulticlassDiagonalHessian(self): with self.test_session() as sess: # Batch is 4, 2 classes gradients = array_ops.constant( [[0.2, 1.4], [-0.5, 0.1], [1.2, 3], [4.0, -3]]) # Each hessian is a diagonal from a full hessian matrix. hessian_0 = [0.12, 0.11] hessian_1 = [0.07, 0.2] hessian_2 = [0.2, 0.9] hessian_3 = [0.13, 2.2] hessians = array_ops.constant( [hessian_0, hessian_1, hessian_2, hessian_3]) example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64) values = array_ops.constant([0.52, 0.3, 0.52]) sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1]) gradient_shape = tensor_shape.TensorShape([2]) hessian_shape = tensor_shape.TensorShape([2]) class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, sparse_float_column=sparse_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.DIAGONAL_HESSIAN) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_right # Each leaf has 2 element vector. self.assertEqual(2, len(left_child.value)) self.assertEqual(2, len(right_child.value)) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) split_info.ParseFromString(splits[1]) left_child = split_info.left_child.vector right_child = split_info.right_child.vector split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertEqual(2, len(left_child.value)) self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.52, split_node.split.threshold) def testGenerateFeatureSplitCandidatesInactive(self): with self.test_session() as sess: # The data looks like the following: # Example | Gradients | Partition | Sparse Quantile | # i0 | (0.2, 0.12) | 0 | 1 | # i1 | (-0.5, 0.07) | 0 | N/A | # i2 | (1.2, 0.2) | 0 | 0 | # i3 | (4.0, 0.13) | 1 | 1 | gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) example_partitions = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) indices = array_ops.constant([[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64) values = array_ops.constant([0.52, 0.3, 0.52]) sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1]) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, sparse_float_column=sparse_column, init_stamp_token=0, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, False])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([False, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) # The handler was inactive so it shouldn't any splits. self.assertEqual(len(partitions), 0) self.assertEqual(len(gains), 0) self.assertEqual(len(splits), 0) def testEmpty(self): with self.test_session() as sess: indices = array_ops.constant([], dtype=dtypes.int64, shape=[0, 2]) # No values in this feature column in this mini-batch. values = array_ops.constant([], dtype=dtypes.float32) sparse_column = sparse_tensor.SparseTensor(indices, values, [4, 1]) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, sparse_float_column=sparse_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0]) hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13]) partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32) empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([4, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, partition_ids, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertEqual(len(partitions), 0) self.assertEqual(len(gains), 0) self.assertEqual(len(splits), 0) def testDegenerativeCase(self): with self.test_session() as sess: # One data example only, one leaf and thus one quantile bucket.The same # situation is when all examples have the same values. This case was # causing before a failure. gradients = array_ops.constant([0.2]) hessians = array_ops.constant([0.12]) example_partitions = array_ops.constant([1], dtype=dtypes.int32) indices = array_ops.constant([[0, 0]], dtype=dtypes.int64) values = array_ops.constant([0.58]) sparse_column = sparse_tensor.SparseTensor(indices, values, [1, 1]) gradient_shape = tensor_shape.scalar() hessian_shape = tensor_shape.scalar() class_id = -1 split_handler = ordinal_split_handler.SparseSplitHandler( l1_regularization=0, l2_regularization=2, tree_complexity_regularization=0, min_node_weight=0, epsilon=0.01, num_quantiles=2, feature_column_group_id=0, sparse_float_column=sparse_column, init_stamp_token=0, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS) resources.initialize_resources(resources.shared_resources()).run() empty_gradients, empty_hessians = get_empty_tensors( gradient_shape, hessian_shape) example_weights = array_ops.ones([1, 1], dtypes.float32) update_1 = split_handler.update_stats_sync( 0, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_1]): are_splits_ready = split_handler.make_splits(0, 1, class_id)[0] with ops.control_dependencies([are_splits_ready]): update_2 = split_handler.update_stats_sync( 1, example_partitions, gradients, hessians, empty_gradients, empty_hessians, example_weights, is_active=array_ops.constant([True, True])) with ops.control_dependencies([update_2]): are_splits_ready2, partitions, gains, splits = ( split_handler.make_splits(1, 2, class_id)) are_splits_ready, are_splits_ready2, partitions, gains, splits = ( sess.run([ are_splits_ready, are_splits_ready2, partitions, gains, splits ])) # During the first iteration, inequality split handlers are not going to # have any splits. Make sure that we return not_ready in that case. self.assertFalse(are_splits_ready) self.assertTrue(are_splits_ready2) self.assertAllEqual([1], partitions) self.assertAllEqual([0.0], gains) split_info = split_info_pb2.SplitInfo() split_info.ParseFromString(splits[0]) split_node = split_info.split_node.sparse_float_binary_split_default_left self.assertEqual(0, split_node.split.feature_column) self.assertAllClose(0.58, split_node.split.threshold) if __name__ == "__main__": googletest.main()
jackytu/newbrandx
refs/heads/rankx
sites/newbrandx/tests/test_checkout_forms.py
34
from django.test import TestCase from apps.checkout import forms from oscar.apps.address import models from oscar.apps.order import models as order_models class TestBillingAddressForm(TestCase): def setUp(self): models.Country.objects.create( iso_3166_1_a2='GB', name="Great Britain") self.shipping_address = order_models.ShippingAddress() def test_selecting_same_as_shipping_is_valid_with_no_billing_address_data(self): data = { 'same_as_shipping': 'same', 'first_name': '', 'last_name': '', 'line1': '', 'line2': '', 'line3': '', 'line4': '', 'postcode': '', 'state': '', 'country': 'GB' } form = forms.BillingAddressForm( shipping_address=self.shipping_address, data=data) self.assertTrue( form.is_valid(), "Form invalid due to %r" % form.errors) def test_selecting_same_as_shipping_is_valid(self): data = { 'same_as_shipping': 'same', 'first_name': 'test', 'last_name': 'test', 'line1': 'test', 'line2': 'test', 'line3': 'test', 'line4': 'test', 'postcode': 'test', 'state': '', 'country': 'GB' } form = forms.BillingAddressForm( shipping_address=self.shipping_address, data=data) self.assertTrue( form.is_valid(), "Form invalid due to %r" % form.errors) def test_selecting_manual_validate_address(self): data = { 'same_as_shipping': 'new', 'first_name': 'test', 'last_name': 'test', 'line1': 'test', 'line2': 'test', 'line3': 'test', 'line4': 'test', 'postcode': 'test', 'state': '', 'country': 'GB' } form = forms.BillingAddressForm( shipping_address=self.shipping_address, data=data) self.assertFalse(form.is_valid())
berinhard/newfies-dialer
refs/heads/master
newfies/frontend/models.py
28
# # Newfies-Dialer License # http://www.newfies-dialer.org # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (C) 2011-2013 Star2Billing S.L. # # The Initial Developer of the Original Code is # Arezqui Belaid <info@star2billing.com> #
TathagataChakraborti/resource-conflicts
refs/heads/master
PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/test/test_gdbm.py
17
#! /usr/bin/env python """Test script for the gdbm module Roger E. Masse """ import gdbm from gdbm import error from test.test_support import verbose, verify, TestFailed, TESTFN filename = TESTFN g = gdbm.open(filename, 'c') verify(g.keys() == []) g['a'] = 'b' g['12345678910'] = '019237410982340912840198242' a = g.keys() if verbose: print 'Test gdbm file keys: ', a g.has_key('a') g.close() try: g['a'] except error: pass else: raise TestFailed, "expected gdbm.error accessing closed database" g = gdbm.open(filename, 'r') g.close() g = gdbm.open(filename, 'w') g.close() g = gdbm.open(filename, 'n') g.close() try: g = gdbm.open(filename, 'rx') g.close() except error: pass else: raise TestFailed, "expected gdbm.error when passing invalid open flags" try: import os os.unlink(filename) except: pass
paulrouget/servo
refs/heads/master
etc/ci/performance/submit_to_s3.py
20
#!/usr/bin/env python3 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. import argparse import boto3 def main(): parser = argparse.ArgumentParser( description=("Submit Servo performance data to S3. " "Remember to set your S3 credentials " "https://github.com/boto/boto3")) parser.add_argument("perf_file", help="the output CSV file from runner") parser.add_argument("perf_key", help="the S3 key to upload to") args = parser.parse_args() s3 = boto3.client('s3') BUCKET = 'servo-perf' s3.upload_file(args.perf_file, BUCKET, args.perf_key) print("Done!") if __name__ == "__main__": main()
LarsMichelsen/pmatic
refs/heads/master
ccu_pkg/python/lib/python2.7/rfc822.py
36
"""RFC 2822 message manipulation. Note: This is only a very rough sketch of a full RFC-822 parser; in particular the tokenizing of addresses does not adhere to all the quoting rules. Note: RFC 2822 is a long awaited update to RFC 822. This module should conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some effort at RFC 2822 updates have been made, but a thorough audit has not been performed. Consider any RFC 2822 non-conformance to be a bug. RFC 2822: http://www.faqs.org/rfcs/rfc2822.html RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete) Directions for use: To create a Message object: first open a file, e.g.: fp = open(file, 'r') You can use any other legal way of getting an open file object, e.g. use sys.stdin or call os.popen(). Then pass the open file object to the Message() constructor: m = Message(fp) This class can work with any input object that supports a readline method. If the input object has seek and tell capability, the rewindbody method will work; also illegal lines will be pushed back onto the input stream. If the input object lacks seek but has an `unread' method that can push back a line of input, Message will use that to push back illegal lines. Thus this class can be used to parse messages coming from a buffered stream. The optional `seekable' argument is provided as a workaround for certain stdio libraries in which tell() discards buffered data before discovering that the lseek() system call doesn't work. For maximum portability, you should set the seekable argument to zero to prevent that initial \code{tell} when passing in an unseekable object such as a file object created from a socket object. If it is 1 on entry -- which it is by default -- the tell() method of the open file object is called once; if this raises an exception, seekable is reset to 0. For other nonzero values of seekable, this test is not made. To get the text of a particular header there are several methods: str = m.getheader(name) str = m.getrawheader(name) where name is the name of the header, e.g. 'Subject'. The difference is that getheader() strips the leading and trailing whitespace, while getrawheader() doesn't. Both functions retain embedded whitespace (including newlines) exactly as they are specified in the header, and leave the case of the text unchanged. For addresses and address lists there are functions realname, mailaddress = m.getaddr(name) list = m.getaddrlist(name) where the latter returns a list of (realname, mailaddr) tuples. There is also a method time = m.getdate(name) which parses a Date-like field and returns a time-compatible tuple, i.e. a tuple such as returned by time.localtime() or accepted by time.mktime(). See the class definition for lower level access methods. There are also some utility functions here. """ # Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com> import time from warnings import warnpy3k warnpy3k("in 3.x, rfc822 has been removed in favor of the email package", stacklevel=2) __all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] _blanklines = ('\r\n', '\n') # Optimization for islast() class Message: """Represents a single RFC 2822-compliant message.""" def __init__(self, fp, seekable = 1): """Initialize the class instance and read the headers.""" if seekable == 1: # Exercise tell() to make sure it works # (and then assume seek() works, too) try: fp.tell() except (AttributeError, IOError): seekable = 0 self.fp = fp self.seekable = seekable self.startofheaders = None self.startofbody = None # if self.seekable: try: self.startofheaders = self.fp.tell() except IOError: self.seekable = 0 # self.readheaders() # if self.seekable: try: self.startofbody = self.fp.tell() except IOError: self.seekable = 0 def rewindbody(self): """Rewind the file to the start of the body (if seekable).""" if not self.seekable: raise IOError, "unseekable file" self.fp.seek(self.startofbody) def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = lst = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. lst.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. lst.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break def isheader(self, line): """Determine whether a given line is a legal header. This method should return the header name, suitably canonicalized. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats with special header formats. """ i = line.find(':') if i > 0: return line[:i].lower() return None def islast(self, line): """Determine whether a line is a legal end of RFC 2822 headers. You may override this method if your application wants to bend the rules, e.g. to strip trailing whitespace, or to recognize MH template separators ('--------'). For convenience (e.g. for code reading from sockets) a line consisting of \r\n also matches. """ return line in _blanklines def iscomment(self, line): """Determine whether a line should be skipped entirely. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats that support embedded comments or free-text data. """ return False def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def getfirstmatchingheader(self, name): """Get the first header line matching name. This is similar to getallmatchingheaders, but it returns only the first matching header (and its continuation lines). """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if hit: if not line[:1].isspace(): break elif line[:n].lower() == name: hit = 1 if hit: lst.append(line) return lst def getrawheader(self, name): """A higher-level interface to getfirstmatchingheader(). Return a string containing the literal text of the header but with the keyword stripped. All leading, trailing and embedded whitespace is kept in the string, however. Return None if the header does not occur. """ lst = self.getfirstmatchingheader(name) if not lst: return None lst[0] = lst[0][len(name) + 1:] return ''.join(lst) def getheader(self, name, default=None): """Get the header value for a name. This is the normal interface: it returns a stripped version of the header value for a given header name, or None if it doesn't exist. This uses the dictionary version which finds the *last* such header. """ return self.dict.get(name.lower(), default) get = getheader def getheaders(self, name): """Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list. """ result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result def getaddr(self, name): """Get a single address from a header, as a tuple. An example return value: ('Guido van Rossum', 'guido@cwi.nl') """ # New, by Ben Escoto alist = self.getaddrlist(name) if alist: return alist[0] else: return (None, None) def getaddrlist(self, name): """Get a list of addresses from a header. Retrieves a list of addresses from a header, where each address is a tuple as returned by getaddr(). Scans all named headers, so it works properly with multiple To: or Cc: headers for example. """ raw = [] for h in self.getallmatchingheaders(name): if h[0] in ' \t': raw.append(h) else: if raw: raw.append(', ') i = h.find(':') if i > 0: addr = h[i+1:] raw.append(addr) alladdrs = ''.join(raw) a = AddressList(alladdrs) return a.addresslist def getdate(self, name): """Retrieve a date field from a header. Retrieves a date field from the named header, returning a tuple compatible with time.mktime(). """ try: data = self[name] except KeyError: return None return parsedate(data) def getdate_tz(self, name): """Retrieve a date field from a header as a 10-tuple. The first 9 elements make up a tuple compatible with time.mktime(), and the 10th is the offset of the poster's time zone from GMT/UTC. """ try: data = self[name] except KeyError: return None return parsedate_tz(data) # Access as a dictionary (only finds *last* header of each type): def __len__(self): """Get the number of headers in a message.""" return len(self.dict) def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()] def __setitem__(self, name, value): """Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was. """ del self[name] # Won't fail if it doesn't exist self.dict[name.lower()] = value text = name + ": " + value for line in text.split("\n"): self.headers.append(line + "\n") def __delitem__(self, name): """Delete all occurrences of a specific header, if it is present.""" name = name.lower() if not name in self.dict: return del self.dict[name] name = name + ':' n = len(name) lst = [] hit = 0 for i in range(len(self.headers)): line = self.headers[i] if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(i) for i in reversed(lst): del self.headers[i] def setdefault(self, name, default=""): lowername = name.lower() if lowername in self.dict: return self.dict[lowername] else: text = name + ": " + default for line in text.split("\n"): self.headers.append(line + "\n") self.dict[lowername] = default return default def has_key(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __contains__(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __iter__(self): return iter(self.dict) def keys(self): """Get all of a message's header field names.""" return self.dict.keys() def values(self): """Get all of a message's header field values.""" return self.dict.values() def items(self): """Get all of a message's headers. Returns a list of name, value tuples. """ return self.dict.items() def __str__(self): return ''.join(self.headers) # Utility functions # ----------------- # XXX Should fix unquote() and quote() to be really conformant. # XXX The inverses of the parse functions may also be useful. def unquote(s): """Remove quotes from a string.""" if len(s) > 1: if s.startswith('"') and s.endswith('"'): return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') if s.startswith('<') and s.endswith('>'): return s[1:-1] return s def quote(s): """Add quotes around a string.""" return s.replace('\\', '\\\\').replace('"', '\\"') def parseaddr(address): """Parse an address into a (realname, mailaddr) tuple.""" a = AddressList(address) lst = a.addresslist if not lst: return (None, None) return lst[0] class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. http://www.faqs.org/rfcs/rfc2822.html Note: this class interface is deprecated and may be removed in the future. Use rfc822.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Parse up to the start of the next address.""" while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': self.pos = self.pos + 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] ad = self.getaddress() while ad: result += ad ad = self.getaddress() return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(' '.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(' '.join(plist) + ' (' + \ ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(' '.join(plist), routeaddr)] else: if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = 0 self.pos += 1 self.gotonext() adlist = "" while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = 0 elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = 1 elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): if self.field[self.pos] == '.': aslist.append('.') self.pos += 1 elif self.field[self.pos] == '"': aslist.append('"%s"' % self.getquote()) elif self.field[self.pos] in self.atomends: break else: aslist.append(self.getatom()) self.gotonext() if self.pos >= len(self.field) or self.field[self.pos] != '@': return ''.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return ''.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return ''.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments = 1): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = 0 self.pos += 1 while self.pos < len(self.field): if quote == 1: slist.append(self.field[self.pos]) quote = 0 elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = 1 else: slist.append(self.field[self.pos]) self.pos += 1 return ''.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', 0) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', 1) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', 0) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return ''.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __str__(self): return ", ".join(map(dump_address_pair, self.addresslist)) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index] def dump_address_pair(pair): """Dump a (name, address) pair in a canonicalized form.""" if pair[0]: return '"' + pair[0] + '" <' + pair[1] + '>' else: return pair[1] # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ if not data: return None data = data.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: # no space after the "weekday,"? i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if not mm in _monthnames: dd, mm = mm, dd.lower() if not mm in _monthnames: return None mm = _monthnames.index(mm)+1 if mm > 12: mm = mm - 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if t is None: return t return t[:9] def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = time.mktime(data[:8] + (0,)) return t - data[9] - time.timezone def formatdate(timeval=None): """Returns time format preferred for Internet standards. Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 According to RFC 1123, day and month names must always be in English. If not for that, this code could use strftime(). It can't because strftime() honors the locale and could generated non-English names. """ if timeval is None: timeval = time.time() timeval = time.gmtime(timeval) return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]], timeval[2], ("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1], timeval[0], timeval[3], timeval[4], timeval[5]) # When used as script, run a small test program. # The first command line argument must be a filename containing one # message in RFC-822 format. if __name__ == '__main__': import sys, os file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') if sys.argv[1:]: file = sys.argv[1] f = open(file, 'r') m = Message(f) print 'From:', m.getaddr('from') print 'To:', m.getaddrlist('to') print 'Subject:', m.getheader('subject') print 'Date:', m.getheader('date') date = m.getdate_tz('date') tz = date[-1] date = time.localtime(mktime_tz(date)) if date: print 'ParsedDate:', time.asctime(date), hhmmss = tz hhmm, ss = divmod(hhmmss, 60) hh, mm = divmod(hhmm, 60) print "%+03d%02d" % (hh, mm), if ss: print ".%02d" % ss, print else: print 'ParsedDate:', None m.rewindbody() n = 0 while f.readline(): n += 1 print 'Lines:', n print '-'*70 print 'len =', len(m) if 'Date' in m: print 'Date =', m['Date'] if 'X-Nonsense' in m: pass print 'keys =', m.keys() print 'values =', m.values() print 'items =', m.items()
shitolepriya/Saloon_erp
refs/heads/master
erpnext/accounts/report/accounts_receivable/accounts_receivable.py
11
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe import _, scrub from frappe.utils import getdate, nowdate, flt, cint class ReceivablePayableReport(object): def __init__(self, filters=None): self.filters = frappe._dict(filters or {}) self.filters.report_date = getdate(self.filters.report_date or nowdate()) self.age_as_on = getdate(nowdate()) \ if self.filters.report_date > getdate(nowdate()) \ else self.filters.report_date def run(self, args): party_naming_by = frappe.db.get_value(args.get("naming_by")[0], None, args.get("naming_by")[1]) return self.get_columns(party_naming_by, args), self.get_data(party_naming_by, args) def get_columns(self, party_naming_by, args): columns = [_("Posting Date") + ":Date:80", _(args.get("party_type")) + ":Link/" + args.get("party_type") + ":200"] if party_naming_by == "Naming Series": columns += [args.get("party_type") + " Name::110"] columns += [_("Voucher Type") + "::110", _("Voucher No") + ":Dynamic Link/Voucher Type:120", _("Due Date") + ":Date:80"] if args.get("party_type") == "Supplier": columns += [_("Bill No") + "::80", _("Bill Date") + ":Date:80"] for label in ("Invoiced Amount", "Paid Amount", "Outstanding Amount"): columns.append({ "label": label, "fieldtype": "Currency", "options": "currency", "width": 120 }) columns += [_("Age (Days)") + ":Int:80"] if not "range1" in self.filters: self.filters["range1"] = "30" if not "range2" in self.filters: self.filters["range2"] = "60" if not "range3" in self.filters: self.filters["range3"] = "90" for label in ("0-{range1}".format(**self.filters), "{range1}-{range2}".format(**self.filters), "{range2}-{range3}".format(**self.filters), "{range3}-{above}".format(range3=self.filters.range3, above=_("Above"))): columns.append({ "label": label, "fieldtype": "Currency", "options": "currency", "width": 120 }) if args.get("party_type") == "Customer": columns += [_("Territory") + ":Link/Territory:80"] if args.get("party_type") == "Supplier": columns += [_("Supplier Type") + ":Link/Supplier Type:80"] columns += [ { "fieldname": "currency", "label": _("Currency"), "fieldtype": "Data", "width": 100, }, _("Remarks") + "::200" ] return columns def get_data(self, party_naming_by, args): from erpnext.accounts.utils import get_currency_precision currency_precision = get_currency_precision() or 2 dr_or_cr = "debit" if args.get("party_type") == "Customer" else "credit" voucher_details = self.get_voucher_details(args.get("party_type")) future_vouchers = self.get_entries_after(self.filters.report_date, args.get("party_type")) company_currency = frappe.db.get_value("Company", self.filters.get("company"), "default_currency") data = [] for gle in self.get_entries_till(self.filters.report_date, args.get("party_type")): if self.is_receivable_or_payable(gle, dr_or_cr, future_vouchers): outstanding_amount = self.get_outstanding_amount(gle, self.filters.report_date, dr_or_cr) if abs(outstanding_amount) > 0.1/10**currency_precision: row = [gle.posting_date, gle.party] # customer / supplier name if party_naming_by == "Naming Series": row += [self.get_party_name(gle.party_type, gle.party)] # get due date due_date = voucher_details.get(gle.voucher_no, {}).get("due_date", "") row += [gle.voucher_type, gle.voucher_no, due_date] # get supplier bill details if args.get("party_type") == "Supplier": row += [ voucher_details.get(gle.voucher_no, {}).get("bill_no", ""), voucher_details.get(gle.voucher_no, {}).get("bill_date", "") ] # invoiced and paid amounts invoiced_amount = gle.get(dr_or_cr) if (gle.get(dr_or_cr) > 0) else 0 paid_amt = invoiced_amount - outstanding_amount row += [invoiced_amount, paid_amt, outstanding_amount] # ageing data entry_date = due_date if self.filters.ageing_based_on == "Due Date" else gle.posting_date row += get_ageing_data(cint(self.filters.range1), cint(self.filters.range2), cint(self.filters.range3), self.age_as_on, entry_date, outstanding_amount) # customer territory / supplier type if args.get("party_type") == "Customer": row += [self.get_territory(gle.party)] if args.get("party_type") == "Supplier": row += [self.get_supplier_type(gle.party)] if self.filters.get(scrub(args.get("party_type"))): row.append(gle.account_currency) else: row.append(company_currency) row.append(gle.remarks) data.append(row) return data def get_entries_after(self, report_date, party_type): # returns a distinct list return list(set([(e.voucher_type, e.voucher_no) for e in self.get_gl_entries(party_type) if getdate(e.posting_date) > report_date])) def get_entries_till(self, report_date, party_type): # returns a generator return (e for e in self.get_gl_entries(party_type) if getdate(e.posting_date) <= report_date) def is_receivable_or_payable(self, gle, dr_or_cr, future_vouchers): return ( # advance (not gle.against_voucher) or # against sales order/purchase order (gle.against_voucher_type in ["Sales Order", "Purchase Order"]) or # sales invoice/purchase invoice (gle.against_voucher==gle.voucher_no and gle.get(dr_or_cr) > 0) or # entries adjusted with future vouchers ((gle.against_voucher_type, gle.against_voucher) in future_vouchers) ) def get_outstanding_amount(self, gle, report_date, dr_or_cr): payment_amount = 0.0 for e in self.get_gl_entries_for(gle.party, gle.party_type, gle.voucher_type, gle.voucher_no): if getdate(e.posting_date) <= report_date and e.name!=gle.name: payment_amount += (flt(e.credit if gle.party_type == "Customer" else e.debit) - flt(e.get(dr_or_cr))) return flt(gle.get(dr_or_cr)) - flt(gle.credit if gle.party_type == "Customer" else gle.debit) - payment_amount def get_party_name(self, party_type, party_name): return self.get_party_map(party_type).get(party_name, {}).get("customer_name" if party_type == "Customer" else "supplier_name") or "" def get_territory(self, party_name): return self.get_party_map("Customer").get(party_name, {}).get("territory") or "" def get_supplier_type(self, party_name): return self.get_party_map("Supplier").get(party_name, {}).get("supplier_type") or "" def get_party_map(self, party_type): if not hasattr(self, "party_map"): if party_type == "Customer": self.party_map = dict(((r.name, r) for r in frappe.db.sql("""select {0}, {1}, {2} from `tab{3}`""" .format("name", "customer_name", "territory", party_type), as_dict=True))) elif party_type == "Supplier": self.party_map = dict(((r.name, r) for r in frappe.db.sql("""select {0}, {1}, {2} from `tab{3}`""" .format("name", "supplier_name", "supplier_type", party_type), as_dict=True))) return self.party_map def get_voucher_details(self, party_type): voucher_details = frappe._dict() if party_type == "Customer": for si in frappe.db.sql("""select name, due_date from `tabSales Invoice` where docstatus=1""", as_dict=1): voucher_details.setdefault(si.name, si) if party_type == "Supplier": for pi in frappe.db.sql("""select name, due_date, bill_no, bill_date from `tabPurchase Invoice` where docstatus=1""", as_dict=1): voucher_details.setdefault(pi.name, pi) return voucher_details def get_gl_entries(self, party_type): if not hasattr(self, "gl_entries"): conditions, values = self.prepare_conditions(party_type) if self.filters.get(scrub(party_type)): select_fields = "debit_in_account_currency as debit, credit_in_account_currency as credit" else: select_fields = "debit, credit" self.gl_entries = frappe.db.sql("""select name, posting_date, account, party_type, party, voucher_type, voucher_no, against_voucher_type, against_voucher, account_currency, remarks, {0} from `tabGL Entry` where docstatus < 2 and party_type=%s and ifnull(party, '') != '' {1} order by posting_date, party""" .format(select_fields, conditions), values, as_dict=True) return self.gl_entries def prepare_conditions(self, party_type): conditions = [""] values = [party_type] party_type_field = scrub(party_type) if self.filters.company: conditions.append("company=%s") values.append(self.filters.company) if self.filters.get(party_type_field): conditions.append("party=%s") values.append(self.filters.get(party_type_field)) return " and ".join(conditions), values def get_gl_entries_for(self, party, party_type, against_voucher_type, against_voucher): if not hasattr(self, "gl_entries_map"): self.gl_entries_map = {} for gle in self.get_gl_entries(party_type): if gle.against_voucher_type and gle.against_voucher: self.gl_entries_map.setdefault(gle.party, {})\ .setdefault(gle.against_voucher_type, {})\ .setdefault(gle.against_voucher, [])\ .append(gle) return self.gl_entries_map.get(party, {})\ .get(against_voucher_type, {})\ .get(against_voucher, []) def execute(filters=None): args = { "party_type": "Customer", "naming_by": ["Selling Settings", "cust_master_name"], } return ReceivablePayableReport(filters).run(args) def get_ageing_data(first_range, second_range, third_range, age_as_on, entry_date, outstanding_amount): # [0-30, 30-60, 60-90, 90-above] outstanding_range = [0.0, 0.0, 0.0, 0.0] if not (age_as_on and entry_date): return [0] + outstanding_range age = (getdate(age_as_on) - getdate(entry_date)).days or 0 index = None for i, days in enumerate([first_range, second_range, third_range]): if age <= days: index = i break if index is None: index = 3 outstanding_range[index] = outstanding_amount return [age] + outstanding_range
CentricWebEstate/bottle
refs/heads/master
test/test_config.py
13
import unittest from bottle import ConfigDict class TestConfDict(unittest.TestCase): def test_isadict(self): """ ConfigDict should behaves like a normal dict. """ # It is a dict-subclass, so this kind of pointless, but it doen't hurt. d, m = dict(), ConfigDict() d['key'], m['key'] = 'value', 'value' d['k2'], m['k2'] = 'v1', 'v1' d['k2'], m['k2'] = 'v2', 'v2' self.assertEqual(d.keys(), m.keys()) self.assertEqual(list(d.values()), list(m.values())) self.assertEqual(d.get('key'), m.get('key')) self.assertEqual(d.get('cay'), m.get('cay')) self.assertEqual(list(iter(d)), list(iter(m))) self.assertEqual([k for k in d], [k for k in m]) self.assertEqual(len(d), len(m)) self.assertEqual('key' in d, 'key' in m) self.assertEqual('cay' in d, 'cay' in m) self.assertRaises(KeyError, lambda: m['cay']) def test_write(self): c = ConfigDict() c['key'] = 'value' self.assertEqual(c['key'], 'value') self.assertTrue('key' in c) c['key'] = 'value2' self.assertEqual(c['key'], 'value2') def test_update(self): c = ConfigDict() c['key'] = 'value' c.update(key='value2', key2='value3') self.assertEqual(c['key'], 'value2') self.assertEqual(c['key2'], 'value3') def test_namespaces(self): c = ConfigDict() c.update('a.b', key='value') self.assertEqual(c['a.b.key'], 'value') def test_meta(self): c = ConfigDict() c.meta_set('bool', 'filter', bool) c.meta_set('int', 'filter', int) c['bool'] = 'I am so true!' c['int'] = '6' self.assertTrue(c['bool'] is True) self.assertEqual(c['int'], 6) self.assertRaises(ValueError, lambda: c.update(int='not an int')) def test_load_dict(self): c = ConfigDict() d = dict(a=dict(b=dict(foo=5, bar=6), baz=7)) c.load_dict(d) self.assertEqual(c['a.b.foo'], 5) self.assertEqual(c['a.b.bar'], 6) self.assertEqual(c['a.baz'], 7) # unicode keys (see issue #720) try: key = unichr(12354) except NameError: key = chr(12354) c = ConfigDict() c.load_dict({key: 'value'}) self.assertEqual('value', c[key]) c = ConfigDict() c.load_dict({key: {'subkey': 'value'}}) self.assertEqual('value', c[key + '.subkey']) if __name__ == '__main__': #pragma: no cover unittest.main()
psyonara/listattack
refs/heads/master
lists/forms.py
1
from django import forms class NewListForm(forms.Form): name = forms.CharField(max_length=100) is_private = forms.BooleanField() is_checklist = forms.BooleanField()
Jusedawg/SickRage
refs/heads/develop
lib/mako/compat.py
40
import sys import time py3k = sys.version_info >= (3, 0) py33 = sys.version_info >= (3, 3) py2k = sys.version_info < (3,) py26 = sys.version_info >= (2, 6) jython = sys.platform.startswith('java') win32 = sys.platform.startswith('win') pypy = hasattr(sys, 'pypy_version_info') if py3k: # create a "getargspec" from getfullargspec(), which is not deprecated # in Py3K; getargspec() has started to emit warnings as of Py3.5. # As of Py3.4, now they are trying to move from getfullargspec() # to "signature()", but getfullargspec() is not deprecated, so stick # with that for now. import collections ArgSpec = collections.namedtuple( "ArgSpec", ["args", "varargs", "keywords", "defaults"]) from inspect import getfullargspec as inspect_getfullargspec def inspect_getargspec(func): return ArgSpec( *inspect_getfullargspec(func)[0:4] ) else: from inspect import getargspec as inspect_getargspec # noqa if py3k: from io import StringIO import builtins as compat_builtins from urllib.parse import quote_plus, unquote_plus from html.entities import codepoint2name, name2codepoint string_types = str, binary_type = bytes text_type = str from io import BytesIO as byte_buffer def u(s): return s def b(s): return s.encode("latin-1") def octal(lit): return eval("0o" + lit) else: import __builtin__ as compat_builtins # noqa try: from cStringIO import StringIO except: from StringIO import StringIO byte_buffer = StringIO from urllib import quote_plus, unquote_plus # noqa from htmlentitydefs import codepoint2name, name2codepoint # noqa string_types = basestring, # noqa binary_type = str text_type = unicode # noqa def u(s): return unicode(s, "utf-8") # noqa def b(s): return s def octal(lit): return eval("0" + lit) if py33: from importlib import machinery def load_module(module_id, path): return machinery.SourceFileLoader(module_id, path).load_module() else: import imp def load_module(module_id, path): fp = open(path, 'rb') try: return imp.load_source(module_id, path, fp) finally: fp.close() if py3k: def reraise(tp, value, tb=None, cause=None): if cause is not None: value.__cause__ = cause if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: exec("def reraise(tp, value, tb=None, cause=None):\n" " raise tp, value, tb\n") def exception_as(): return sys.exc_info()[1] try: import threading if py3k: import _thread as thread else: import thread except ImportError: import dummy_threading as threading # noqa if py3k: import _dummy_thread as thread else: import dummy_thread as thread # noqa if win32 or jython: time_func = time.clock else: time_func = time.time try: from functools import partial except: def partial(func, *args, **keywords): def newfunc(*fargs, **fkeywords): newkeywords = keywords.copy() newkeywords.update(fkeywords) return func(*(args + fargs), **newkeywords) return newfunc all = all import json # noqa def exception_name(exc): return exc.__class__.__name__ try: from inspect import CO_VARKEYWORDS, CO_VARARGS def inspect_func_args(fn): if py3k: co = fn.__code__ else: co = fn.func_code nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] if py3k: return args, varargs, varkw, fn.__defaults__ else: return args, varargs, varkw, fn.func_defaults except ImportError: import inspect def inspect_func_args(fn): return inspect.getargspec(fn) if py3k: def callable(fn): return hasattr(fn, '__call__') else: callable = callable ################################################ # cross-compatible metaclass implementation # Copyright (c) 2010-2012 Benjamin Peterson def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("%sBase" % meta.__name__, (base,), {}) ################################################ def arg_stringname(func_arg): """Gets the string name of a kwarg or vararg In Python3.4 a function's args are of _ast.arg type not _ast.name """ if hasattr(func_arg, 'arg'): return func_arg.arg else: return str(func_arg)
kytvi2p/Sigil
refs/heads/master
src/Resource_Files/python3lib/bs4repair.py
2
#!/usr/bin/env python3 import sys # until we get a properly embedded python 3 with its own site-packages # force our current module path to come before site_packages # to prevent name collisions with our versions and any site-packages def insert_into_syspath(): n = 0 sp = None ourhome = sys.path[-1] for apath in sys.path: if apath.endswith("site-packages"): sp = n break n += 1 if sp is not None: sys.path.insert(sp,ourhome) insert_into_syspath() from bs4 import BeautifulSoup from bs4.builder._lxml import LXMLTreeBuilderForXML import re ebook_xml_empty_tags = ["meta", "item", "itemref", "reference", "content"] def remove_xml_header(data): return re.sub(r'<\s*\?xml\s*[^>]*\?>\s*','',data, flags=re.I) # borrowed from Kovid's calibre to work around # <title/> parsing idiocy in html5lib # see: http://code.google.com/p/html5lib/issues/detail?id=195 def fix_self_closing_cdata_tags(data): return re.sub(r'<\s*(%s)\s*[^>]*/\s*>' % ('|'.join(cdataElements|rcdataElements)), r'<\1></\1>', data, flags=re.I) # BS4 with lxml for xml strips whitespace so always will want to prettyprint xml # def repairXML(data, self_closing_tags=ebook_xml_empty_tags): # xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=self_closing_tags) # soup = BeautifulSoup(data, features=None, builder=xmlbuilder) # newdata = soup.serialize() # return newdata def repairPrettyPrintXML(data, self_closing_tags=ebook_xml_empty_tags, indent_chars=" "): xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=self_closing_tags) soup = BeautifulSoup(data, features=None, builder=xmlbuilder) newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=indent_chars) return newdata def main(): opfxml = ''' <?xml version="1.0" encoding="utf-8" standalone="yes"?> <package xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId" version="2.0"> <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf"> <dc:identifier id="BookId" opf:scheme="UUID">urn:uuid:a418a8f1-dcbc-4c5d-a18f-533765e34ee8</dc:identifier> </metadata> <manifest> <item href="toc.ncx" id="ncx" media-type="application/x-dtbncx+xml" /> <item href="Text/Section0001.xhtml" id="Section0001.xhtml" media-type="application/xhtml+xml" /> </manifest> <spine toc="ncx"> <itemref idref="Section0001.xhtml" > </spine> <guide /> </package> ''' print(repairPrettyPrintXML(opfxml, indent_chars=" ")) return 0 if __name__ == '__main__': sys.exit(main())
houlixin/BBB-TISDK
refs/heads/master
linux-devkit/sysroots/i686-arago-linux/usr/lib/python2.7/distutils/debug.py
251
import os __revision__ = "$Id$" # If DISTUTILS_DEBUG is anything other than the empty string, we run in # debug mode. DEBUG = os.environ.get('DISTUTILS_DEBUG')
beatrizjesus/my-first-blog
refs/heads/master
pasta/Lib/site-packages/django/conf/locale/fr/formats.py
116
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'j N Y' SHORT_DATETIME_FORMAT = 'j N Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' '%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06' # '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006' ) DATETIME_INPUT_FORMATS = ( '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30' '%d.%m.%Y', # Swiss (fr_CH), '25.10.2006' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
uwosh/Campus_Directory_web_service
refs/heads/master
getClassesWithInstructorsByTermCX.py
1
# getClassesWithInstructorsByTermCX - get info (including instructor) on classes offered in a semester; does not include classes without an assigned instructor import re import xmlrpclib import cx_Oracle Kim_Nguyen_G5 = '192.168.0.1' Kim_Nguyen_iMac = '192.168.0.1' Kim_Nguyen_MacBook = '192.168.0.1' Kim_Nguyen_MacBook_IDEA_Lab = '192.168.0.1' Plone1 = '192.168.0.1' Plone3 = '192.168.0.1' Joel_Herron_iMac = '192.168.0.1' ws_it_uwosh_edu = '192.168.0.1' Maccabee_Levine = '192.168.0.1' MIO_Helios_Server = '192.168.0.1' David_Hietpas = '192.168.0.1' def getClassesWithInstructorsByTermCX(self, strm): request = self.REQUEST RESPONSE = request.RESPONSE remote_addr = request.REMOTE_ADDR if remote_addr in [Kim_Nguyen_iMac, Joel_Herron_iMac, '127.0.0.1', Plone3, ws_it_uwosh_edu, Maccabee_Levine, David_Hietpas, MIO_Helios_Server, Kim_Nguyen_MacBook, Kim_Nguyen_MacBook_IDEA_Lab]: file = open('/opt/Plone-2.5.5/zeocluster/client2/Extensions/Oracle_Database_Connection_NGUYEN_PRD.txt', 'r') for line in file.readlines(): if line <> "" and not line.startswith('#'): connString = line file.close() connection = cx_Oracle.connect(connString) cursor = connection.cursor() cursor.execute(""" select c.subject, c.catalog_nbr, c.descr, c.class_section, c.crse_id, c.session_code, c.acad_group, c.class_nbr, n.first_name, n.last_name, e.email_addr, cc.course_title_long, nvl(t.descr, ''), im.instruction_mode, im.descr from ps_class_tbl c left join ps_crse_topics t on c.crse_id = t.crse_id and c.crs_topic_id = t.crs_topic_id, ps_names n, ps_class_instr i, ps_email_addresses e, ps_crse_catalog cc, ps_instruct_mode im where c.strm = :arg1 and c.institution = 'UWOSH' and c.crse_id = i.crse_id and i.strm = c.strm and i.class_section = c.class_section and n.emplid = i.emplid and n.eff_status = 'A' and n.name_type = 'PRI' and n.effdt = (select max(effdt) from ps_names n2 where n2.emplid = n.emplid and n2.eff_status = n.eff_status and n2.name_type = n.name_type) and e.emplid = i.emplid and e_addr_type = 'CAMP' and c.crse_id = cc.crse_id and cc.effdt = (select max(effdt) from ps_crse_catalog where crse_id = cc.crse_id and eff_status = 'A') and cc.eff_status = 'A' and c.institution = im.institution and c.instruction_mode = im.instruction_mode and im.eff_status = 'A' and im.effdt = (select max(im2.effdt) from ps_instruct_mode im2 where im2.instruction_mode = im.instruction_mode and eff_status = 'A') -- AND c.subject = 'ART' --order by c.subject, c.catalog_nbr, c.class_section """, arg1 = strm) retlist = [] for column_1, column_2, column_3, column_4, column_5, column_6, column_7, column_8, column_9, column_10, column_11, column_12, column_13, column_14, column_15 in cursor: retlist.append([column_1, column_2, column_3, column_4, column_5, column_6, column_7, column_8, column_9, column_10, column_11, column_12, column_13, column_14, column_15, ]) myMarshaller = xmlrpclib.Marshaller(allow_none=True) return myMarshaller.dumps(retlist)
mixman/djangodev
refs/heads/master
tests/regressiontests/generic_views/edit.py
1
from __future__ import absolute_import from django.core.exceptions import ImproperlyConfigured from django.core.urlresolvers import reverse from django import forms from django.test import TestCase from django.utils.unittest import expectedFailure from . import views from .models import Artist, Author class ModelFormMixinTests(TestCase): def test_get_form(self): form_class = views.AuthorGetQuerySetFormView().get_form_class() self.assertEqual(form_class._meta.model, Author) class CreateViewTests(TestCase): urls = 'regressiontests.generic_views.urls' def test_create(self): res = self.client.get('/edit/authors/create/') self.assertEqual(res.status_code, 200) self.assertTrue(isinstance(res.context['form'], forms.ModelForm)) self.assertFalse('object' in res.context) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/author_form.html') res = self.client.post('/edit/authors/create/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>']) def test_create_invalid(self): res = self.client.post('/edit/authors/create/', {'name': 'A' * 101, 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_form.html') self.assertEqual(len(res.context['form'].errors), 1) self.assertEqual(Author.objects.count(), 0) def test_create_with_object_url(self): res = self.client.post('/edit/artists/create/', {'name': 'Rene Magritte'}) self.assertEqual(res.status_code, 302) artist = Artist.objects.get(name='Rene Magritte') self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk) self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>']) def test_create_with_redirect(self): res = self.client.post('/edit/authors/create/redirect/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/edit/authors/create/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>']) def test_create_with_interpolated_redirect(self): res = self.client.post('/edit/authors/create/interpolate_redirect/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>']) self.assertEqual(res.status_code, 302) pk = Author.objects.all()[0].pk self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk) def test_create_with_special_properties(self): res = self.client.get('/edit/authors/create/special/') self.assertEqual(res.status_code, 200) self.assertTrue(isinstance(res.context['form'], views.AuthorForm)) self.assertFalse('object' in res.context) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/form.html') res = self.client.post('/edit/authors/create/special/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) obj = Author.objects.get(slug='randall-munroe') self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk})) self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>']) def test_create_without_redirect(self): try: res = self.client.post('/edit/authors/create/naive/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided') except ImproperlyConfigured: pass def test_create_restricted(self): res = self.client.post('/edit/authors/create/restricted/', {'name': 'Randall Munroe', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/') class UpdateViewTests(TestCase): urls = 'regressiontests.generic_views.urls' def test_update_post(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.get('/edit/author/%d/update/' % a.pk) self.assertEqual(res.status_code, 200) self.assertTrue(isinstance(res.context['form'], forms.ModelForm)) self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk)) self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk)) self.assertTemplateUsed(res, 'generic_views/author_form.html') # Modification with both POST and PUT (browser compatible) res = self.client.post('/edit/author/%d/update/' % a.pk, {'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>']) @expectedFailure def test_update_put(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.get('/edit/author/%d/update/' % a.pk) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_form.html') res = self.client.put('/edit/author/%d/update/' % a.pk, {'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>']) def test_update_invalid(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.post('/edit/author/%d/update/' % a.pk, {'name': 'A' * 101, 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 200) self.assertTemplateUsed(res, 'generic_views/author_form.html') self.assertEqual(len(res.context['form'].errors), 1) self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>']) def test_update_with_object_url(self): a = Artist.objects.create(name='Rene Magritte') res = self.client.post('/edit/artists/%d/update/' % a.pk, {'name': 'Rene Magritte'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk) self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>']) def test_update_with_redirect(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.post('/edit/author/%d/update/redirect/' % a.pk, {'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/edit/authors/create/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>']) def test_update_with_interpolated_redirect(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.post('/edit/author/%d/update/interpolate_redirect/' % a.pk, {'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}) self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>']) self.assertEqual(res.status_code, 302) pk = Author.objects.all()[0].pk self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk) def test_update_with_special_properties(self): a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.get('/edit/author/%d/update/special/' % a.pk) self.assertEqual(res.status_code, 200) self.assertTrue(isinstance(res.context['form'], views.AuthorForm)) self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk)) self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk)) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/form.html') res = self.client.post('/edit/author/%d/update/special/' % a.pk, {'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk) self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>']) def test_update_without_redirect(self): try: a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.post('/edit/author/%d/update/naive/' % a.pk, {'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}) self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided') except ImproperlyConfigured: pass def test_update_get_object(self): a = Author.objects.create( pk=1, name='Randall Munroe', slug='randall-munroe', ) res = self.client.get('/edit/author/update/') self.assertEqual(res.status_code, 200) self.assertTrue(isinstance(res.context['form'], forms.ModelForm)) self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk)) self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk)) self.assertTemplateUsed(res, 'generic_views/author_form.html') # Modification with both POST and PUT (browser compatible) res = self.client.post('/edit/author/update/', {'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'}) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>']) class DeleteViewTests(TestCase): urls = 'regressiontests.generic_views.urls' def test_delete_by_post(self): a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'}) res = self.client.get('/edit/author/%d/delete/' % a.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk)) self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk)) self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html') # Deletion with POST res = self.client.post('/edit/author/%d/delete/' % a.pk) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), []) def test_delete_by_delete(self): # Deletion with browser compatible DELETE method a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'}) res = self.client.delete('/edit/author/%d/delete/' % a.pk) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), []) def test_delete_with_redirect(self): a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'}) res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/edit/authors/create/') self.assertQuerysetEqual(Author.objects.all(), []) def test_delete_with_special_properties(self): a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'}) res = self.client.get('/edit/author/%d/delete/special/' % a.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk)) self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk)) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/confirm_delete.html') res = self.client.post('/edit/author/%d/delete/special/' % a.pk) self.assertEqual(res.status_code, 302) self.assertRedirects(res, 'http://testserver/list/authors/') self.assertQuerysetEqual(Author.objects.all(), []) def test_delete_without_redirect(self): try: a = Author.objects.create( name='Randall Munroe', slug='randall-munroe', ) res = self.client.post('/edit/author/%d/delete/naive/' % a.pk) self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided') except ImproperlyConfigured: pass
romain-li/edx-platform
refs/heads/master
common/test/acceptance/pages/lms/course_wiki.py
2
""" Wiki tab on courses """ from common.test.acceptance.pages.lms.course_page import CoursePage from common.test.acceptance.pages.studio.utils import type_in_codemirror class CourseWikiPage(CoursePage): """ Course wiki navigation and objects. """ url_path = "wiki" def is_browser_on_page(self): """ Browser is on the wiki page if the wiki breadcrumb is present """ return self.q(css='.breadcrumb').present def open_editor(self): """ Display the editor for a wiki article. """ edit_button = self.q(css='.fa-pencil') edit_button.click() def show_history(self): """ Show the change history for a wiki article. """ edit_button = self.q(css='.fa-clock-o') edit_button.click() def show_children(self): """ Show the children of a wiki article. """ children_link = self.q(css='.see-children>a') children_link.click() @property def article_name(self): """ Return the name of the article """ return str(self.q(css='.main-article .entry-title').text[0]) class CourseWikiSubviewPage(CoursePage): # pylint: disable=abstract-method """ Abstract base page for subviews within the wiki. """ def __init__(self, browser, course_id, course_info): """ Course ID is currently of the form "edx/999/2013_Spring" but this format could change. """ super(CourseWikiSubviewPage, self).__init__(browser, course_id) self.course_id = course_id self.course_info = course_info self.article_name = "{org}.{course_number}.{course_run}".format( org=self.course_info['org'], course_number=self.course_info['number'], course_run=self.course_info['run'] ) class CourseWikiEditPage(CourseWikiSubviewPage): """ Editor page """ @property def url_path(self): """ Construct a URL to the page within the course. """ return "/wiki/" + self.article_name + "/_edit" def is_browser_on_page(self): """ The wiki page editor """ return self.q(css='.CodeMirror-scroll').present def replace_wiki_content(self, content): """ Editor must be open already. This will replace any content in the editor with new content """ type_in_codemirror(self, 0, content) def save_wiki_content(self): """ When the editor is open, click save """ self.q(css='button[name="save"]').click() self.wait_for_element_presence('.alert-success', 'wait for the article to be saved') class CourseWikiHistoryPage(CourseWikiSubviewPage): """ Course wiki change history page. """ def is_browser_on_page(self): """ Return if the browser is on the history page. """ return self.q(css='section.history').present @property def url_path(self): """ Construct a URL to the page within the course. """ return "/wiki/" + self.article_name + "/_history" class CourseWikiChildrenPage(CourseWikiSubviewPage): """ Course wiki "All Children" page. """ def is_browser_on_page(self): """ Return if the browser is on the wiki children page (which contains a search widget). """ return self.q(css='.form-search').present @property def url_path(self): """ Construct a URL to the page within the course. """ return "/wiki/" + self.article_name + "/_dir"
Eagles2F/sync-engine
refs/heads/master
migrations/versions/172_update_easuid_schema_4.py
8
"""update_easuid_schema_4 Revision ID: d0427f9f3d1 Revises: 584356bf23a3 Create Date: 2015-05-19 21:35:03.342221 """ # revision identifiers, used by Alembic. revision = 'd0427f9f3d1' down_revision = '584356bf23a3' from alembic import op def upgrade(): from inbox.ignition import main_engine engine = main_engine(pool_size=1, max_overflow=0) if not engine.has_table('easuid'): return conn = op.get_bind() conn.execute('''ALTER TABLE easuid CHANGE COLUMN msg_uid msg_uid INT(11) DEFAULT NULL, CHANGE COLUMN fld_uid fld_uid INT(11) DEFAULT NULL, ADD UNIQUE INDEX easaccount_id_2 (easaccount_id, device_id, easfoldersyncstatus_id, server_id)''') def downgrade(): raise Exception()
onlyjus/pyqtgraph
refs/heads/develop
pyqtgraph/opengl/items/GLScatterPlotItem.py
39
from OpenGL.GL import * from OpenGL.arrays import vbo from .. GLGraphicsItem import GLGraphicsItem from .. import shaders from ... import QtGui import numpy as np __all__ = ['GLScatterPlotItem'] class GLScatterPlotItem(GLGraphicsItem): """Draws points at a list of 3D positions.""" def __init__(self, **kwds): GLGraphicsItem.__init__(self) glopts = kwds.pop('glOptions', 'additive') self.setGLOptions(glopts) self.pos = [] self.size = 10 self.color = [1.0,1.0,1.0,0.5] self.pxMode = True #self.vbo = {} ## VBO does not appear to improve performance very much. self.setData(**kwds) def setData(self, **kwds): """ Update the data displayed by this item. All arguments are optional; for example it is allowed to update spot positions while leaving colors unchanged, etc. ==================== ================================================== **Arguments:** pos (N,3) array of floats specifying point locations. color (N,4) array of floats (0.0-1.0) specifying spot colors OR a tuple of floats specifying a single color for all spots. size (N,) array of floats specifying spot sizes or a single value to apply to all spots. pxMode If True, spot sizes are expressed in pixels. Otherwise, they are expressed in item coordinates. ==================== ================================================== """ args = ['pos', 'color', 'size', 'pxMode'] for k in kwds.keys(): if k not in args: raise Exception('Invalid keyword argument: %s (allowed arguments are %s)' % (k, str(args))) args.remove('pxMode') for arg in args: if arg in kwds: setattr(self, arg, kwds[arg]) #self.vbo.pop(arg, None) self.pxMode = kwds.get('pxMode', self.pxMode) self.update() def initializeGL(self): ## Generate texture for rendering points w = 64 def fn(x,y): r = ((x-w/2.)**2 + (y-w/2.)**2) ** 0.5 return 255 * (w/2. - np.clip(r, w/2.-1.0, w/2.)) pData = np.empty((w, w, 4)) pData[:] = 255 pData[:,:,3] = np.fromfunction(fn, pData.shape[:2]) #print pData.shape, pData.min(), pData.max() pData = pData.astype(np.ubyte) if getattr(self, "pointTexture", None) is None: self.pointTexture = glGenTextures(1) glActiveTexture(GL_TEXTURE0) glEnable(GL_TEXTURE_2D) glBindTexture(GL_TEXTURE_2D, self.pointTexture) glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, pData.shape[0], pData.shape[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, pData) self.shader = shaders.getShaderProgram('pointSprite') #def getVBO(self, name): #if name not in self.vbo: #self.vbo[name] = vbo.VBO(getattr(self, name).astype('f')) #return self.vbo[name] #def setupGLState(self): #"""Prepare OpenGL state for drawing. This function is called immediately before painting.""" ##glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) ## requires z-sorting to render properly. #glBlendFunc(GL_SRC_ALPHA, GL_ONE) #glEnable( GL_BLEND ) #glEnable( GL_ALPHA_TEST ) #glDisable( GL_DEPTH_TEST ) ##glEnable( GL_POINT_SMOOTH ) ##glHint(GL_POINT_SMOOTH_HINT, GL_NICEST) ##glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, (0, 0, -1e-3)) ##glPointParameterfv(GL_POINT_SIZE_MAX, (65500,)) ##glPointParameterfv(GL_POINT_SIZE_MIN, (0,)) def paint(self): self.setupGLState() glEnable(GL_POINT_SPRITE) glActiveTexture(GL_TEXTURE0) glEnable( GL_TEXTURE_2D ) glBindTexture(GL_TEXTURE_2D, self.pointTexture) glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE) #glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) ## use texture color exactly #glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE ) ## texture modulates current color glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) glEnable(GL_PROGRAM_POINT_SIZE) with self.shader: #glUniform1i(self.shader.uniform('texture'), 0) ## inform the shader which texture to use glEnableClientState(GL_VERTEX_ARRAY) try: pos = self.pos #if pos.ndim > 2: #pos = pos.reshape((reduce(lambda a,b: a*b, pos.shape[:-1]), pos.shape[-1])) glVertexPointerf(pos) if isinstance(self.color, np.ndarray): glEnableClientState(GL_COLOR_ARRAY) glColorPointerf(self.color) else: if isinstance(self.color, QtGui.QColor): glColor4f(*fn.glColor(self.color)) else: glColor4f(*self.color) if not self.pxMode or isinstance(self.size, np.ndarray): glEnableClientState(GL_NORMAL_ARRAY) norm = np.empty(pos.shape) if self.pxMode: norm[...,0] = self.size else: gpos = self.mapToView(pos.transpose()).transpose() pxSize = self.view().pixelSize(gpos) norm[...,0] = self.size / pxSize glNormalPointerf(norm) else: glNormal3f(self.size, 0, 0) ## vertex shader uses norm.x to determine point size #glPointSize(self.size) glDrawArrays(GL_POINTS, 0, int(pos.size / pos.shape[-1])) finally: glDisableClientState(GL_NORMAL_ARRAY) glDisableClientState(GL_VERTEX_ARRAY) glDisableClientState(GL_COLOR_ARRAY) #posVBO.unbind() #for i in range(len(self.pos)): #pos = self.pos[i] #if isinstance(self.color, np.ndarray): #color = self.color[i] #else: #color = self.color #if isinstance(self.color, QtGui.QColor): #color = fn.glColor(self.color) #if isinstance(self.size, np.ndarray): #size = self.size[i] #else: #size = self.size #pxSize = self.view().pixelSize(QtGui.QVector3D(*pos)) #glPointSize(size / pxSize) #glBegin( GL_POINTS ) #glColor4f(*color) # x is blue ##glNormal3f(size, 0, 0) #glVertex3f(*pos) #glEnd()
mayblue9/scikit-learn
refs/heads/master
sklearn/preprocessing/tests/test_function_transformer.py
176
from nose.tools import assert_equal import numpy as np from sklearn.preprocessing import FunctionTransformer def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X): def _func(X, *args, **kwargs): args_store.append(X) args_store.extend(args) kwargs_store.update(kwargs) return func(X) return _func def test_delegate_to_func(): # (args|kwargs)_store will hold the positional and keyword arguments # passed to the function inside the FunctionTransformer. args_store = [] kwargs_store = {} X = np.arange(10).reshape((5, 2)) np.testing.assert_array_equal( FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X), X, 'transform should have returned X unchanged', ) # The function should only have recieved X. assert_equal( args_store, [X], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) # reset the argument stores. args_store[:] = [] # python2 compatible inplace list clear. kwargs_store.clear() y = object() np.testing.assert_array_equal( FunctionTransformer( _make_func(args_store, kwargs_store), pass_y=True, ).transform(X, y), X, 'transform should have returned X unchanged', ) # The function should have recieved X and y. assert_equal( args_store, [X, y], 'Incorrect positional arguments passed to func: {args}'.format( args=args_store, ), ) assert_equal( kwargs_store, {}, 'Unexpected keyword arguments passed to func: {args}'.format( args=kwargs_store, ), ) def test_np_log(): X = np.arange(10).reshape((5, 2)) # Test that the numpy.log example still works. np.testing.assert_array_equal( FunctionTransformer(np.log1p).transform(X), np.log1p(X), )
jcoady9/python-for-android
refs/heads/master
python-modules/twisted/twisted/conch/insults/colors.py
146
""" You don't really want to use this module. Try helper.py instead. """ CLEAR = 0 BOLD = 1 DIM = 2 ITALIC = 3 UNDERSCORE = 4 BLINK_SLOW = 5 BLINK_FAST = 6 REVERSE = 7 CONCEALED = 8 FG_BLACK = 30 FG_RED = 31 FG_GREEN = 32 FG_YELLOW = 33 FG_BLUE = 34 FG_MAGENTA = 35 FG_CYAN = 36 FG_WHITE = 37 BG_BLACK = 40 BG_RED = 41 BG_GREEN = 42 BG_YELLOW = 43 BG_BLUE = 44 BG_MAGENTA = 45 BG_CYAN = 46 BG_WHITE = 47
msebire/intellij-community
refs/heads/master
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_1/_pkg0_1_0_1_0/_mod0_1_0_1_0_4.py
30
name0_1_0_1_0_4_0 = None name0_1_0_1_0_4_1 = None name0_1_0_1_0_4_2 = None name0_1_0_1_0_4_3 = None name0_1_0_1_0_4_4 = None
leotada/pyrpg
refs/heads/master
sprite.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # # sprite.py # # Copyright 2016 Leonardo <leonardo_tada@hotmail.com> # import pygame from gameobjects.vector2 import Vector2 class Sprite(): def __init__(self, game, image_name): self.game = game self.image = None self.image_name = image_name self.position = Vector2() def load(self): self.image = pygame.image.load(self.image_name).convert_alpha() def update(self): pass def draw(self): self.game.screen.blit(self.image, self.position.value)
837468220/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/test/ssl_servers.py
57
import os import sys import ssl import pprint import socket import urllib.parse # Rename HTTPServer to _HTTPServer so as to avoid confusion with HTTPSServer. from http.server import (HTTPServer as _HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler) from test import support threading = support.import_module("threading") here = os.path.dirname(__file__) HOST = support.HOST CERTFILE = os.path.join(here, 'keycert.pem') # This one's based on HTTPServer, which is based on SocketServer class HTTPSServer(_HTTPServer): def __init__(self, server_address, handler_class, context): _HTTPServer.__init__(self, server_address, handler_class) self.context = context def __str__(self): return ('<%s %s:%s>' % (self.__class__.__name__, self.server_name, self.server_port)) def get_request(self): # override this to wrap socket with SSL try: sock, addr = self.socket.accept() sslconn = self.context.wrap_socket(sock, server_side=True) except socket.error as e: # socket errors are silenced by the caller, print them here if support.verbose: sys.stderr.write("Got an error:\n%s\n" % e) raise return sslconn, addr class RootedHTTPRequestHandler(SimpleHTTPRequestHandler): # need to override translate_path to get a known root, # instead of using os.curdir, since the test could be # run from anywhere server_version = "TestHTTPS/1.0" root = here # Avoid hanging when a request gets interrupted by the client timeout = 5 def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = urllib.parse.urlparse(path)[2] path = os.path.normpath(urllib.parse.unquote(path)) words = path.split('/') words = filter(None, words) path = self.root for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) path = os.path.join(path, word) return path def log_message(self, format, *args): # we override this to suppress logging unless "verbose" if support.verbose: sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" % (self.server.server_address, self.server.server_port, self.request.cipher(), self.log_date_time_string(), format%args)) class StatsRequestHandler(BaseHTTPRequestHandler): """Example HTTP request handler which returns SSL statistics on GET requests. """ server_version = "StatsHTTPS/1.0" def do_GET(self, send_body=True): """Serve a GET request.""" sock = self.rfile.raw._sock context = sock.context body = pprint.pformat(context.session_stats()) body = body.encode('utf-8') self.send_response(200) self.send_header("Content-type", "text/plain; charset=utf-8") self.send_header("Content-Length", str(len(body))) self.end_headers() if send_body: self.wfile.write(body) def do_HEAD(self): """Serve a HEAD request.""" self.do_GET(send_body=False) def log_request(self, format, *args): if support.verbose: BaseHTTPRequestHandler.log_request(self, format, *args) class HTTPSServerThread(threading.Thread): def __init__(self, context, host=HOST, handler_class=None): self.flag = None self.server = HTTPSServer((host, 0), handler_class or RootedHTTPRequestHandler, context) self.port = self.server.server_port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): if self.flag: self.flag.set() try: self.server.serve_forever(0.05) finally: self.server.server_close() def stop(self): self.server.shutdown() def make_https_server(case, certfile=CERTFILE, host=HOST, handler_class=None): # we assume the certfile contains both private key and certificate context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.load_cert_chain(certfile) server = HTTPSServerThread(context, host, handler_class) flag = threading.Event() server.start(flag) flag.wait() def cleanup(): if support.verbose: sys.stdout.write('stopping HTTPS server\n') server.stop() if support.verbose: sys.stdout.write('joining HTTPS thread\n') server.join() case.addCleanup(cleanup) return server if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description='Run a test HTTPS server. ' 'By default, the current directory is served.') parser.add_argument('-p', '--port', type=int, default=4433, help='port to listen on (default: %(default)s)') parser.add_argument('-q', '--quiet', dest='verbose', default=True, action='store_false', help='be less verbose') parser.add_argument('-s', '--stats', dest='use_stats_handler', default=False, action='store_true', help='always return stats page') args = parser.parse_args() support.verbose = args.verbose if args.use_stats_handler: handler_class = StatsRequestHandler else: handler_class = RootedHTTPRequestHandler handler_class.root = os.getcwd() context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) server = HTTPSServer(("", args.port), handler_class, context) if args.verbose: print("Listening on https://localhost:{0.port}".format(args)) server.serve_forever(0.1)
obtuse/ahmia
refs/heads/master
onionMongoBot/onionMongoBot/items.py
5
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Field, Item class CrawledWebsiteItem(Item): """A web site""" domain = Field() url = Field() title = Field() h1 = Field() h2 = Field() html = Field() words = Field()